qemu/target/s390x/translate.c
<<
>>
Prefs
   1/*
   2 *  S/390 translation
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2010 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21/* #define DEBUG_INLINE_BRANCHES */
  22#define S390X_DEBUG_DISAS
  23/* #define S390X_DEBUG_DISAS_VERBOSE */
  24
  25#ifdef S390X_DEBUG_DISAS_VERBOSE
  26#  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
  27#else
  28#  define LOG_DISAS(...) do { } while (0)
  29#endif
  30
  31#include "qemu/osdep.h"
  32#include "cpu.h"
  33#include "internal.h"
  34#include "disas/disas.h"
  35#include "exec/exec-all.h"
  36#include "tcg-op.h"
  37#include "qemu/log.h"
  38#include "qemu/host-utils.h"
  39#include "exec/cpu_ldst.h"
  40#include "exec/gen-icount.h"
  41#include "exec/helper-proto.h"
  42#include "exec/helper-gen.h"
  43
  44#include "trace-tcg.h"
  45#include "exec/log.h"
  46
  47
  48/* Information that (most) every instruction needs to manipulate.  */
  49typedef struct DisasContext DisasContext;
  50typedef struct DisasInsn DisasInsn;
  51typedef struct DisasFields DisasFields;
  52
  53struct DisasContext {
  54    struct TranslationBlock *tb;
  55    const DisasInsn *insn;
  56    DisasFields *fields;
  57    uint64_t ex_value;
  58    uint64_t pc, next_pc;
  59    uint32_t ilen;
  60    enum cc_op cc_op;
  61    bool singlestep_enabled;
  62};
  63
  64/* Information carried about a condition to be evaluated.  */
  65typedef struct {
  66    TCGCond cond:8;
  67    bool is_64;
  68    bool g1;
  69    bool g2;
  70    union {
  71        struct { TCGv_i64 a, b; } s64;
  72        struct { TCGv_i32 a, b; } s32;
  73    } u;
  74} DisasCompare;
  75
  76/* is_jmp field values */
  77#define DISAS_EXCP DISAS_TARGET_0
  78
  79#ifdef DEBUG_INLINE_BRANCHES
  80static uint64_t inline_branch_hit[CC_OP_MAX];
  81static uint64_t inline_branch_miss[CC_OP_MAX];
  82#endif
  83
  84static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
  85{
  86    if (!(s->tb->flags & FLAG_MASK_64)) {
  87        if (s->tb->flags & FLAG_MASK_32) {
  88            return pc | 0x80000000;
  89        }
  90    }
  91    return pc;
  92}
  93
  94static TCGv_i64 psw_addr;
  95static TCGv_i64 psw_mask;
  96static TCGv_i64 gbea;
  97
  98static TCGv_i32 cc_op;
  99static TCGv_i64 cc_src;
 100static TCGv_i64 cc_dst;
 101static TCGv_i64 cc_vr;
 102
 103static char cpu_reg_names[32][4];
 104static TCGv_i64 regs[16];
 105static TCGv_i64 fregs[16];
 106
 107void s390x_translate_init(void)
 108{
 109    int i;
 110
 111    psw_addr = tcg_global_mem_new_i64(cpu_env,
 112                                      offsetof(CPUS390XState, psw.addr),
 113                                      "psw_addr");
 114    psw_mask = tcg_global_mem_new_i64(cpu_env,
 115                                      offsetof(CPUS390XState, psw.mask),
 116                                      "psw_mask");
 117    gbea = tcg_global_mem_new_i64(cpu_env,
 118                                  offsetof(CPUS390XState, gbea),
 119                                  "gbea");
 120
 121    cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
 122                                   "cc_op");
 123    cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
 124                                    "cc_src");
 125    cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
 126                                    "cc_dst");
 127    cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
 128                                   "cc_vr");
 129
 130    for (i = 0; i < 16; i++) {
 131        snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
 132        regs[i] = tcg_global_mem_new(cpu_env,
 133                                     offsetof(CPUS390XState, regs[i]),
 134                                     cpu_reg_names[i]);
 135    }
 136
 137    for (i = 0; i < 16; i++) {
 138        snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
 139        fregs[i] = tcg_global_mem_new(cpu_env,
 140                                      offsetof(CPUS390XState, vregs[i][0].d),
 141                                      cpu_reg_names[i + 16]);
 142    }
 143}
 144
 145static TCGv_i64 load_reg(int reg)
 146{
 147    TCGv_i64 r = tcg_temp_new_i64();
 148    tcg_gen_mov_i64(r, regs[reg]);
 149    return r;
 150}
 151
 152static TCGv_i64 load_freg32_i64(int reg)
 153{
 154    TCGv_i64 r = tcg_temp_new_i64();
 155    tcg_gen_shri_i64(r, fregs[reg], 32);
 156    return r;
 157}
 158
 159static void store_reg(int reg, TCGv_i64 v)
 160{
 161    tcg_gen_mov_i64(regs[reg], v);
 162}
 163
 164static void store_freg(int reg, TCGv_i64 v)
 165{
 166    tcg_gen_mov_i64(fregs[reg], v);
 167}
 168
 169static void store_reg32_i64(int reg, TCGv_i64 v)
 170{
 171    /* 32 bit register writes keep the upper half */
 172    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
 173}
 174
 175static void store_reg32h_i64(int reg, TCGv_i64 v)
 176{
 177    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
 178}
 179
 180static void store_freg32_i64(int reg, TCGv_i64 v)
 181{
 182    tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
 183}
 184
 185static void return_low128(TCGv_i64 dest)
 186{
 187    tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
 188}
 189
 190static void update_psw_addr(DisasContext *s)
 191{
 192    /* psw.addr */
 193    tcg_gen_movi_i64(psw_addr, s->pc);
 194}
 195
 196static void per_branch(DisasContext *s, bool to_next)
 197{
 198#ifndef CONFIG_USER_ONLY
 199    tcg_gen_movi_i64(gbea, s->pc);
 200
 201    if (s->tb->flags & FLAG_MASK_PER) {
 202        TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
 203        gen_helper_per_branch(cpu_env, gbea, next_pc);
 204        if (to_next) {
 205            tcg_temp_free_i64(next_pc);
 206        }
 207    }
 208#endif
 209}
 210
 211static void per_branch_cond(DisasContext *s, TCGCond cond,
 212                            TCGv_i64 arg1, TCGv_i64 arg2)
 213{
 214#ifndef CONFIG_USER_ONLY
 215    if (s->tb->flags & FLAG_MASK_PER) {
 216        TCGLabel *lab = gen_new_label();
 217        tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
 218
 219        tcg_gen_movi_i64(gbea, s->pc);
 220        gen_helper_per_branch(cpu_env, gbea, psw_addr);
 221
 222        gen_set_label(lab);
 223    } else {
 224        TCGv_i64 pc = tcg_const_i64(s->pc);
 225        tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
 226        tcg_temp_free_i64(pc);
 227    }
 228#endif
 229}
 230
 231static void per_breaking_event(DisasContext *s)
 232{
 233    tcg_gen_movi_i64(gbea, s->pc);
 234}
 235
 236static void update_cc_op(DisasContext *s)
 237{
 238    if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
 239        tcg_gen_movi_i32(cc_op, s->cc_op);
 240    }
 241}
 242
 243static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
 244{
 245    return (uint64_t)cpu_lduw_code(env, pc);
 246}
 247
 248static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
 249{
 250    return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
 251}
 252
 253static int get_mem_index(DisasContext *s)
 254{
 255    if (!(s->tb->flags & FLAG_MASK_DAT)) {
 256        return MMU_REAL_IDX;
 257    }
 258
 259    switch (s->tb->flags & FLAG_MASK_ASC) {
 260    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
 261        return MMU_PRIMARY_IDX;
 262    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
 263        return MMU_SECONDARY_IDX;
 264    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
 265        return MMU_HOME_IDX;
 266    default:
 267        tcg_abort();
 268        break;
 269    }
 270}
 271
 272static void gen_exception(int excp)
 273{
 274    TCGv_i32 tmp = tcg_const_i32(excp);
 275    gen_helper_exception(cpu_env, tmp);
 276    tcg_temp_free_i32(tmp);
 277}
 278
 279static void gen_program_exception(DisasContext *s, int code)
 280{
 281    TCGv_i32 tmp;
 282
 283    /* Remember what pgm exeption this was.  */
 284    tmp = tcg_const_i32(code);
 285    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
 286    tcg_temp_free_i32(tmp);
 287
 288    tmp = tcg_const_i32(s->ilen);
 289    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
 290    tcg_temp_free_i32(tmp);
 291
 292    /* update the psw */
 293    update_psw_addr(s);
 294
 295    /* Save off cc.  */
 296    update_cc_op(s);
 297
 298    /* Trigger exception.  */
 299    gen_exception(EXCP_PGM);
 300}
 301
 302static inline void gen_illegal_opcode(DisasContext *s)
 303{
 304    gen_program_exception(s, PGM_OPERATION);
 305}
 306
 307static inline void gen_trap(DisasContext *s)
 308{
 309    TCGv_i32 t;
 310
 311    /* Set DXC to 0xff.  */
 312    t = tcg_temp_new_i32();
 313    tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
 314    tcg_gen_ori_i32(t, t, 0xff00);
 315    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
 316    tcg_temp_free_i32(t);
 317
 318    gen_program_exception(s, PGM_DATA);
 319}
 320
 321#ifndef CONFIG_USER_ONLY
 322static void check_privileged(DisasContext *s)
 323{
 324    if (s->tb->flags & FLAG_MASK_PSTATE) {
 325        gen_program_exception(s, PGM_PRIVILEGED);
 326    }
 327}
 328#endif
 329
 330static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
 331{
 332    TCGv_i64 tmp = tcg_temp_new_i64();
 333    bool need_31 = !(s->tb->flags & FLAG_MASK_64);
 334
 335    /* Note that d2 is limited to 20 bits, signed.  If we crop negative
 336       displacements early we create larger immedate addends.  */
 337
 338    /* Note that addi optimizes the imm==0 case.  */
 339    if (b2 && x2) {
 340        tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
 341        tcg_gen_addi_i64(tmp, tmp, d2);
 342    } else if (b2) {
 343        tcg_gen_addi_i64(tmp, regs[b2], d2);
 344    } else if (x2) {
 345        tcg_gen_addi_i64(tmp, regs[x2], d2);
 346    } else {
 347        if (need_31) {
 348            d2 &= 0x7fffffff;
 349            need_31 = false;
 350        }
 351        tcg_gen_movi_i64(tmp, d2);
 352    }
 353    if (need_31) {
 354        tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
 355    }
 356
 357    return tmp;
 358}
 359
 360static inline bool live_cc_data(DisasContext *s)
 361{
 362    return (s->cc_op != CC_OP_DYNAMIC
 363            && s->cc_op != CC_OP_STATIC
 364            && s->cc_op > 3);
 365}
 366
 367static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
 368{
 369    if (live_cc_data(s)) {
 370        tcg_gen_discard_i64(cc_src);
 371        tcg_gen_discard_i64(cc_dst);
 372        tcg_gen_discard_i64(cc_vr);
 373    }
 374    s->cc_op = CC_OP_CONST0 + val;
 375}
 376
 377static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
 378{
 379    if (live_cc_data(s)) {
 380        tcg_gen_discard_i64(cc_src);
 381        tcg_gen_discard_i64(cc_vr);
 382    }
 383    tcg_gen_mov_i64(cc_dst, dst);
 384    s->cc_op = op;
 385}
 386
 387static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 388                                  TCGv_i64 dst)
 389{
 390    if (live_cc_data(s)) {
 391        tcg_gen_discard_i64(cc_vr);
 392    }
 393    tcg_gen_mov_i64(cc_src, src);
 394    tcg_gen_mov_i64(cc_dst, dst);
 395    s->cc_op = op;
 396}
 397
 398static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 399                                  TCGv_i64 dst, TCGv_i64 vr)
 400{
 401    tcg_gen_mov_i64(cc_src, src);
 402    tcg_gen_mov_i64(cc_dst, dst);
 403    tcg_gen_mov_i64(cc_vr, vr);
 404    s->cc_op = op;
 405}
 406
 407static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
 408{
 409    gen_op_update1_cc_i64(s, CC_OP_NZ, val);
 410}
 411
 412static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
 413{
 414    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
 415}
 416
 417static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
 418{
 419    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
 420}
 421
 422static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
 423{
 424    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
 425}
 426
 427/* CC value is in env->cc_op */
 428static void set_cc_static(DisasContext *s)
 429{
 430    if (live_cc_data(s)) {
 431        tcg_gen_discard_i64(cc_src);
 432        tcg_gen_discard_i64(cc_dst);
 433        tcg_gen_discard_i64(cc_vr);
 434    }
 435    s->cc_op = CC_OP_STATIC;
 436}
 437
 438/* calculates cc into cc_op */
 439static void gen_op_calc_cc(DisasContext *s)
 440{
 441    TCGv_i32 local_cc_op = NULL;
 442    TCGv_i64 dummy = NULL;
 443
 444    switch (s->cc_op) {
 445    default:
 446        dummy = tcg_const_i64(0);
 447        /* FALLTHRU */
 448    case CC_OP_ADD_64:
 449    case CC_OP_ADDU_64:
 450    case CC_OP_ADDC_64:
 451    case CC_OP_SUB_64:
 452    case CC_OP_SUBU_64:
 453    case CC_OP_SUBB_64:
 454    case CC_OP_ADD_32:
 455    case CC_OP_ADDU_32:
 456    case CC_OP_ADDC_32:
 457    case CC_OP_SUB_32:
 458    case CC_OP_SUBU_32:
 459    case CC_OP_SUBB_32:
 460        local_cc_op = tcg_const_i32(s->cc_op);
 461        break;
 462    case CC_OP_CONST0:
 463    case CC_OP_CONST1:
 464    case CC_OP_CONST2:
 465    case CC_OP_CONST3:
 466    case CC_OP_STATIC:
 467    case CC_OP_DYNAMIC:
 468        break;
 469    }
 470
 471    switch (s->cc_op) {
 472    case CC_OP_CONST0:
 473    case CC_OP_CONST1:
 474    case CC_OP_CONST2:
 475    case CC_OP_CONST3:
 476        /* s->cc_op is the cc value */
 477        tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
 478        break;
 479    case CC_OP_STATIC:
 480        /* env->cc_op already is the cc value */
 481        break;
 482    case CC_OP_NZ:
 483    case CC_OP_ABS_64:
 484    case CC_OP_NABS_64:
 485    case CC_OP_ABS_32:
 486    case CC_OP_NABS_32:
 487    case CC_OP_LTGT0_32:
 488    case CC_OP_LTGT0_64:
 489    case CC_OP_COMP_32:
 490    case CC_OP_COMP_64:
 491    case CC_OP_NZ_F32:
 492    case CC_OP_NZ_F64:
 493    case CC_OP_FLOGR:
 494        /* 1 argument */
 495        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
 496        break;
 497    case CC_OP_ICM:
 498    case CC_OP_LTGT_32:
 499    case CC_OP_LTGT_64:
 500    case CC_OP_LTUGTU_32:
 501    case CC_OP_LTUGTU_64:
 502    case CC_OP_TM_32:
 503    case CC_OP_TM_64:
 504    case CC_OP_SLA_32:
 505    case CC_OP_SLA_64:
 506    case CC_OP_NZ_F128:
 507        /* 2 arguments */
 508        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
 509        break;
 510    case CC_OP_ADD_64:
 511    case CC_OP_ADDU_64:
 512    case CC_OP_ADDC_64:
 513    case CC_OP_SUB_64:
 514    case CC_OP_SUBU_64:
 515    case CC_OP_SUBB_64:
 516    case CC_OP_ADD_32:
 517    case CC_OP_ADDU_32:
 518    case CC_OP_ADDC_32:
 519    case CC_OP_SUB_32:
 520    case CC_OP_SUBU_32:
 521    case CC_OP_SUBB_32:
 522        /* 3 arguments */
 523        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
 524        break;
 525    case CC_OP_DYNAMIC:
 526        /* unknown operation - assume 3 arguments and cc_op in env */
 527        gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
 528        break;
 529    default:
 530        tcg_abort();
 531    }
 532
 533    if (local_cc_op) {
 534        tcg_temp_free_i32(local_cc_op);
 535    }
 536    if (dummy) {
 537        tcg_temp_free_i64(dummy);
 538    }
 539
 540    /* We now have cc in cc_op as constant */
 541    set_cc_static(s);
 542}
 543
 544static bool use_exit_tb(DisasContext *s)
 545{
 546    return (s->singlestep_enabled ||
 547            (tb_cflags(s->tb) & CF_LAST_IO) ||
 548            (s->tb->flags & FLAG_MASK_PER));
 549}
 550
 551static bool use_goto_tb(DisasContext *s, uint64_t dest)
 552{
 553    if (unlikely(use_exit_tb(s))) {
 554        return false;
 555    }
 556#ifndef CONFIG_USER_ONLY
 557    return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
 558           (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
 559#else
 560    return true;
 561#endif
 562}
 563
 564static void account_noninline_branch(DisasContext *s, int cc_op)
 565{
 566#ifdef DEBUG_INLINE_BRANCHES
 567    inline_branch_miss[cc_op]++;
 568#endif
 569}
 570
 571static void account_inline_branch(DisasContext *s, int cc_op)
 572{
 573#ifdef DEBUG_INLINE_BRANCHES
 574    inline_branch_hit[cc_op]++;
 575#endif
 576}
 577
 578/* Table of mask values to comparison codes, given a comparison as input.
 579   For such, CC=3 should not be possible.  */
 580static const TCGCond ltgt_cond[16] = {
 581    TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
 582    TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
 583    TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
 584    TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
 585    TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
 586    TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
 587    TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
 588    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
 589};
 590
 591/* Table of mask values to comparison codes, given a logic op as input.
 592   For such, only CC=0 and CC=1 should be possible.  */
 593static const TCGCond nz_cond[16] = {
 594    TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
 595    TCG_COND_NEVER, TCG_COND_NEVER,
 596    TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
 597    TCG_COND_NE, TCG_COND_NE,
 598    TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
 599    TCG_COND_EQ, TCG_COND_EQ,
 600    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
 601    TCG_COND_ALWAYS, TCG_COND_ALWAYS,
 602};
 603
 604/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
 605   details required to generate a TCG comparison.  */
 606static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
 607{
 608    TCGCond cond;
 609    enum cc_op old_cc_op = s->cc_op;
 610
 611    if (mask == 15 || mask == 0) {
 612        c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
 613        c->u.s32.a = cc_op;
 614        c->u.s32.b = cc_op;
 615        c->g1 = c->g2 = true;
 616        c->is_64 = false;
 617        return;
 618    }
 619
 620    /* Find the TCG condition for the mask + cc op.  */
 621    switch (old_cc_op) {
 622    case CC_OP_LTGT0_32:
 623    case CC_OP_LTGT0_64:
 624    case CC_OP_LTGT_32:
 625    case CC_OP_LTGT_64:
 626        cond = ltgt_cond[mask];
 627        if (cond == TCG_COND_NEVER) {
 628            goto do_dynamic;
 629        }
 630        account_inline_branch(s, old_cc_op);
 631        break;
 632
 633    case CC_OP_LTUGTU_32:
 634    case CC_OP_LTUGTU_64:
 635        cond = tcg_unsigned_cond(ltgt_cond[mask]);
 636        if (cond == TCG_COND_NEVER) {
 637            goto do_dynamic;
 638        }
 639        account_inline_branch(s, old_cc_op);
 640        break;
 641
 642    case CC_OP_NZ:
 643        cond = nz_cond[mask];
 644        if (cond == TCG_COND_NEVER) {
 645            goto do_dynamic;
 646        }
 647        account_inline_branch(s, old_cc_op);
 648        break;
 649
 650    case CC_OP_TM_32:
 651    case CC_OP_TM_64:
 652        switch (mask) {
 653        case 8:
 654            cond = TCG_COND_EQ;
 655            break;
 656        case 4 | 2 | 1:
 657            cond = TCG_COND_NE;
 658            break;
 659        default:
 660            goto do_dynamic;
 661        }
 662        account_inline_branch(s, old_cc_op);
 663        break;
 664
 665    case CC_OP_ICM:
 666        switch (mask) {
 667        case 8:
 668            cond = TCG_COND_EQ;
 669            break;
 670        case 4 | 2 | 1:
 671        case 4 | 2:
 672            cond = TCG_COND_NE;
 673            break;
 674        default:
 675            goto do_dynamic;
 676        }
 677        account_inline_branch(s, old_cc_op);
 678        break;
 679
 680    case CC_OP_FLOGR:
 681        switch (mask & 0xa) {
 682        case 8: /* src == 0 -> no one bit found */
 683            cond = TCG_COND_EQ;
 684            break;
 685        case 2: /* src != 0 -> one bit found */
 686            cond = TCG_COND_NE;
 687            break;
 688        default:
 689            goto do_dynamic;
 690        }
 691        account_inline_branch(s, old_cc_op);
 692        break;
 693
 694    case CC_OP_ADDU_32:
 695    case CC_OP_ADDU_64:
 696        switch (mask) {
 697        case 8 | 2: /* vr == 0 */
 698            cond = TCG_COND_EQ;
 699            break;
 700        case 4 | 1: /* vr != 0 */
 701            cond = TCG_COND_NE;
 702            break;
 703        case 8 | 4: /* no carry -> vr >= src */
 704            cond = TCG_COND_GEU;
 705            break;
 706        case 2 | 1: /* carry -> vr < src */
 707            cond = TCG_COND_LTU;
 708            break;
 709        default:
 710            goto do_dynamic;
 711        }
 712        account_inline_branch(s, old_cc_op);
 713        break;
 714
 715    case CC_OP_SUBU_32:
 716    case CC_OP_SUBU_64:
 717        /* Note that CC=0 is impossible; treat it as dont-care.  */
 718        switch (mask & 7) {
 719        case 2: /* zero -> op1 == op2 */
 720            cond = TCG_COND_EQ;
 721            break;
 722        case 4 | 1: /* !zero -> op1 != op2 */
 723            cond = TCG_COND_NE;
 724            break;
 725        case 4: /* borrow (!carry) -> op1 < op2 */
 726            cond = TCG_COND_LTU;
 727            break;
 728        case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
 729            cond = TCG_COND_GEU;
 730            break;
 731        default:
 732            goto do_dynamic;
 733        }
 734        account_inline_branch(s, old_cc_op);
 735        break;
 736
 737    default:
 738    do_dynamic:
 739        /* Calculate cc value.  */
 740        gen_op_calc_cc(s);
 741        /* FALLTHRU */
 742
 743    case CC_OP_STATIC:
 744        /* Jump based on CC.  We'll load up the real cond below;
 745           the assignment here merely avoids a compiler warning.  */
 746        account_noninline_branch(s, old_cc_op);
 747        old_cc_op = CC_OP_STATIC;
 748        cond = TCG_COND_NEVER;
 749        break;
 750    }
 751
 752    /* Load up the arguments of the comparison.  */
 753    c->is_64 = true;
 754    c->g1 = c->g2 = false;
 755    switch (old_cc_op) {
 756    case CC_OP_LTGT0_32:
 757        c->is_64 = false;
 758        c->u.s32.a = tcg_temp_new_i32();
 759        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
 760        c->u.s32.b = tcg_const_i32(0);
 761        break;
 762    case CC_OP_LTGT_32:
 763    case CC_OP_LTUGTU_32:
 764    case CC_OP_SUBU_32:
 765        c->is_64 = false;
 766        c->u.s32.a = tcg_temp_new_i32();
 767        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
 768        c->u.s32.b = tcg_temp_new_i32();
 769        tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
 770        break;
 771
 772    case CC_OP_LTGT0_64:
 773    case CC_OP_NZ:
 774    case CC_OP_FLOGR:
 775        c->u.s64.a = cc_dst;
 776        c->u.s64.b = tcg_const_i64(0);
 777        c->g1 = true;
 778        break;
 779    case CC_OP_LTGT_64:
 780    case CC_OP_LTUGTU_64:
 781    case CC_OP_SUBU_64:
 782        c->u.s64.a = cc_src;
 783        c->u.s64.b = cc_dst;
 784        c->g1 = c->g2 = true;
 785        break;
 786
 787    case CC_OP_TM_32:
 788    case CC_OP_TM_64:
 789    case CC_OP_ICM:
 790        c->u.s64.a = tcg_temp_new_i64();
 791        c->u.s64.b = tcg_const_i64(0);
 792        tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
 793        break;
 794
 795    case CC_OP_ADDU_32:
 796        c->is_64 = false;
 797        c->u.s32.a = tcg_temp_new_i32();
 798        c->u.s32.b = tcg_temp_new_i32();
 799        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
 800        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
 801            tcg_gen_movi_i32(c->u.s32.b, 0);
 802        } else {
 803            tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
 804        }
 805        break;
 806
 807    case CC_OP_ADDU_64:
 808        c->u.s64.a = cc_vr;
 809        c->g1 = true;
 810        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
 811            c->u.s64.b = tcg_const_i64(0);
 812        } else {
 813            c->u.s64.b = cc_src;
 814            c->g2 = true;
 815        }
 816        break;
 817
 818    case CC_OP_STATIC:
 819        c->is_64 = false;
 820        c->u.s32.a = cc_op;
 821        c->g1 = true;
 822        switch (mask) {
 823        case 0x8 | 0x4 | 0x2: /* cc != 3 */
 824            cond = TCG_COND_NE;
 825            c->u.s32.b = tcg_const_i32(3);
 826            break;
 827        case 0x8 | 0x4 | 0x1: /* cc != 2 */
 828            cond = TCG_COND_NE;
 829            c->u.s32.b = tcg_const_i32(2);
 830            break;
 831        case 0x8 | 0x2 | 0x1: /* cc != 1 */
 832            cond = TCG_COND_NE;
 833            c->u.s32.b = tcg_const_i32(1);
 834            break;
 835        case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
 836            cond = TCG_COND_EQ;
 837            c->g1 = false;
 838            c->u.s32.a = tcg_temp_new_i32();
 839            c->u.s32.b = tcg_const_i32(0);
 840            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 841            break;
 842        case 0x8 | 0x4: /* cc < 2 */
 843            cond = TCG_COND_LTU;
 844            c->u.s32.b = tcg_const_i32(2);
 845            break;
 846        case 0x8: /* cc == 0 */
 847            cond = TCG_COND_EQ;
 848            c->u.s32.b = tcg_const_i32(0);
 849            break;
 850        case 0x4 | 0x2 | 0x1: /* cc != 0 */
 851            cond = TCG_COND_NE;
 852            c->u.s32.b = tcg_const_i32(0);
 853            break;
 854        case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
 855            cond = TCG_COND_NE;
 856            c->g1 = false;
 857            c->u.s32.a = tcg_temp_new_i32();
 858            c->u.s32.b = tcg_const_i32(0);
 859            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 860            break;
 861        case 0x4: /* cc == 1 */
 862            cond = TCG_COND_EQ;
 863            c->u.s32.b = tcg_const_i32(1);
 864            break;
 865        case 0x2 | 0x1: /* cc > 1 */
 866            cond = TCG_COND_GTU;
 867            c->u.s32.b = tcg_const_i32(1);
 868            break;
 869        case 0x2: /* cc == 2 */
 870            cond = TCG_COND_EQ;
 871            c->u.s32.b = tcg_const_i32(2);
 872            break;
 873        case 0x1: /* cc == 3 */
 874            cond = TCG_COND_EQ;
 875            c->u.s32.b = tcg_const_i32(3);
 876            break;
 877        default:
 878            /* CC is masked by something else: (8 >> cc) & mask.  */
 879            cond = TCG_COND_NE;
 880            c->g1 = false;
 881            c->u.s32.a = tcg_const_i32(8);
 882            c->u.s32.b = tcg_const_i32(0);
 883            tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
 884            tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
 885            break;
 886        }
 887        break;
 888
 889    default:
 890        abort();
 891    }
 892    c->cond = cond;
 893}
 894
 895static void free_compare(DisasCompare *c)
 896{
 897    if (!c->g1) {
 898        if (c->is_64) {
 899            tcg_temp_free_i64(c->u.s64.a);
 900        } else {
 901            tcg_temp_free_i32(c->u.s32.a);
 902        }
 903    }
 904    if (!c->g2) {
 905        if (c->is_64) {
 906            tcg_temp_free_i64(c->u.s64.b);
 907        } else {
 908            tcg_temp_free_i32(c->u.s32.b);
 909        }
 910    }
 911}
 912
 913/* ====================================================================== */
 914/* Define the insn format enumeration.  */
 915#define F0(N)                         FMT_##N,
 916#define F1(N, X1)                     F0(N)
 917#define F2(N, X1, X2)                 F0(N)
 918#define F3(N, X1, X2, X3)             F0(N)
 919#define F4(N, X1, X2, X3, X4)         F0(N)
 920#define F5(N, X1, X2, X3, X4, X5)     F0(N)
 921
 922typedef enum {
 923#include "insn-format.def"
 924} DisasFormat;
 925
 926#undef F0
 927#undef F1
 928#undef F2
 929#undef F3
 930#undef F4
 931#undef F5
 932
 933/* Define a structure to hold the decoded fields.  We'll store each inside
 934   an array indexed by an enum.  In order to conserve memory, we'll arrange
 935   for fields that do not exist at the same time to overlap, thus the "C"
 936   for compact.  For checking purposes there is an "O" for original index
 937   as well that will be applied to availability bitmaps.  */
 938
 939enum DisasFieldIndexO {
 940    FLD_O_r1,
 941    FLD_O_r2,
 942    FLD_O_r3,
 943    FLD_O_m1,
 944    FLD_O_m3,
 945    FLD_O_m4,
 946    FLD_O_b1,
 947    FLD_O_b2,
 948    FLD_O_b4,
 949    FLD_O_d1,
 950    FLD_O_d2,
 951    FLD_O_d4,
 952    FLD_O_x2,
 953    FLD_O_l1,
 954    FLD_O_l2,
 955    FLD_O_i1,
 956    FLD_O_i2,
 957    FLD_O_i3,
 958    FLD_O_i4,
 959    FLD_O_i5
 960};
 961
 962enum DisasFieldIndexC {
 963    FLD_C_r1 = 0,
 964    FLD_C_m1 = 0,
 965    FLD_C_b1 = 0,
 966    FLD_C_i1 = 0,
 967
 968    FLD_C_r2 = 1,
 969    FLD_C_b2 = 1,
 970    FLD_C_i2 = 1,
 971
 972    FLD_C_r3 = 2,
 973    FLD_C_m3 = 2,
 974    FLD_C_i3 = 2,
 975
 976    FLD_C_m4 = 3,
 977    FLD_C_b4 = 3,
 978    FLD_C_i4 = 3,
 979    FLD_C_l1 = 3,
 980
 981    FLD_C_i5 = 4,
 982    FLD_C_d1 = 4,
 983
 984    FLD_C_d2 = 5,
 985
 986    FLD_C_d4 = 6,
 987    FLD_C_x2 = 6,
 988    FLD_C_l2 = 6,
 989
 990    NUM_C_FIELD = 7
 991};
 992
 993struct DisasFields {
 994    uint64_t raw_insn;
 995    unsigned op:8;
 996    unsigned op2:8;
 997    unsigned presentC:16;
 998    unsigned int presentO;
 999    int c[NUM_C_FIELD];
1000};
1001
1002/* This is the way fields are to be accessed out of DisasFields.  */
1003#define have_field(S, F)  have_field1((S), FLD_O_##F)
1004#define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1005
1006static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1007{
1008    return (f->presentO >> c) & 1;
1009}
1010
1011static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1012                      enum DisasFieldIndexC c)
1013{
1014    assert(have_field1(f, o));
1015    return f->c[c];
1016}
1017
1018/* Describe the layout of each field in each format.  */
1019typedef struct DisasField {
1020    unsigned int beg:8;
1021    unsigned int size:8;
1022    unsigned int type:2;
1023    unsigned int indexC:6;
1024    enum DisasFieldIndexO indexO:8;
1025} DisasField;
1026
1027typedef struct DisasFormatInfo {
1028    DisasField op[NUM_C_FIELD];
1029} DisasFormatInfo;
1030
1031#define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1032#define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1033#define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1034                      { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1035#define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1036                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1037                      { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1038#define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1039                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1040#define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1041                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1042                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1043#define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1044#define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1045
1046#define F0(N)                     { { } },
1047#define F1(N, X1)                 { { X1 } },
1048#define F2(N, X1, X2)             { { X1, X2 } },
1049#define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1050#define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1051#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1052
1053static const DisasFormatInfo format_info[] = {
1054#include "insn-format.def"
1055};
1056
1057#undef F0
1058#undef F1
1059#undef F2
1060#undef F3
1061#undef F4
1062#undef F5
1063#undef R
1064#undef M
1065#undef BD
1066#undef BXD
1067#undef BDL
1068#undef BXDL
1069#undef I
1070#undef L
1071
1072/* Generally, we'll extract operands into this structures, operate upon
1073   them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1074   of routines below for more details.  */
1075typedef struct {
1076    bool g_out, g_out2, g_in1, g_in2;
1077    TCGv_i64 out, out2, in1, in2;
1078    TCGv_i64 addr1;
1079} DisasOps;
1080
1081/* Instructions can place constraints on their operands, raising specification
1082   exceptions if they are violated.  To make this easy to automate, each "in1",
1083   "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1084   of the following, or 0.  To make this easy to document, we'll put the
1085   SPEC_<name> defines next to <name>.  */
1086
1087#define SPEC_r1_even    1
1088#define SPEC_r2_even    2
1089#define SPEC_r3_even    4
1090#define SPEC_r1_f128    8
1091#define SPEC_r2_f128    16
1092
1093/* Return values from translate_one, indicating the state of the TB.  */
1094typedef enum {
1095    /* Continue the TB.  */
1096    NO_EXIT,
1097    /* We have emitted one or more goto_tb.  No fixup required.  */
1098    EXIT_GOTO_TB,
1099    /* We are not using a goto_tb (for whatever reason), but have updated
1100       the PC (for whatever reason), so there's no need to do it again on
1101       exiting the TB.  */
1102    EXIT_PC_UPDATED,
1103    /* We have updated the PC and CC values.  */
1104    EXIT_PC_CC_UPDATED,
1105    /* We are exiting the TB, but have neither emitted a goto_tb, nor
1106       updated the PC for the next instruction to be executed.  */
1107    EXIT_PC_STALE,
1108    /* We are exiting the TB to the main loop.  */
1109    EXIT_PC_STALE_NOCHAIN,
1110    /* We are ending the TB with a noreturn function call, e.g. longjmp.
1111       No following code will be executed.  */
1112    EXIT_NORETURN,
1113} ExitStatus;
1114
1115struct DisasInsn {
1116    unsigned opc:16;
1117    DisasFormat fmt:8;
1118    unsigned fac:8;
1119    unsigned spec:8;
1120
1121    const char *name;
1122
1123    void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1124    void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1125    void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1126    void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1127    void (*help_cout)(DisasContext *, DisasOps *);
1128    ExitStatus (*help_op)(DisasContext *, DisasOps *);
1129
1130    uint64_t data;
1131};
1132
1133/* ====================================================================== */
1134/* Miscellaneous helpers, used by several operations.  */
1135
1136static void help_l2_shift(DisasContext *s, DisasFields *f,
1137                          DisasOps *o, int mask)
1138{
1139    int b2 = get_field(f, b2);
1140    int d2 = get_field(f, d2);
1141
1142    if (b2 == 0) {
1143        o->in2 = tcg_const_i64(d2 & mask);
1144    } else {
1145        o->in2 = get_address(s, 0, b2, d2);
1146        tcg_gen_andi_i64(o->in2, o->in2, mask);
1147    }
1148}
1149
1150static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1151{
1152    if (dest == s->next_pc) {
1153        per_branch(s, true);
1154        return NO_EXIT;
1155    }
1156    if (use_goto_tb(s, dest)) {
1157        update_cc_op(s);
1158        per_breaking_event(s);
1159        tcg_gen_goto_tb(0);
1160        tcg_gen_movi_i64(psw_addr, dest);
1161        tcg_gen_exit_tb((uintptr_t)s->tb);
1162        return EXIT_GOTO_TB;
1163    } else {
1164        tcg_gen_movi_i64(psw_addr, dest);
1165        per_branch(s, false);
1166        return EXIT_PC_UPDATED;
1167    }
1168}
1169
1170static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1171                              bool is_imm, int imm, TCGv_i64 cdest)
1172{
1173    ExitStatus ret;
1174    uint64_t dest = s->pc + 2 * imm;
1175    TCGLabel *lab;
1176
1177    /* Take care of the special cases first.  */
1178    if (c->cond == TCG_COND_NEVER) {
1179        ret = NO_EXIT;
1180        goto egress;
1181    }
1182    if (is_imm) {
1183        if (dest == s->next_pc) {
1184            /* Branch to next.  */
1185            per_branch(s, true);
1186            ret = NO_EXIT;
1187            goto egress;
1188        }
1189        if (c->cond == TCG_COND_ALWAYS) {
1190            ret = help_goto_direct(s, dest);
1191            goto egress;
1192        }
1193    } else {
1194        if (!cdest) {
1195            /* E.g. bcr %r0 -> no branch.  */
1196            ret = NO_EXIT;
1197            goto egress;
1198        }
1199        if (c->cond == TCG_COND_ALWAYS) {
1200            tcg_gen_mov_i64(psw_addr, cdest);
1201            per_branch(s, false);
1202            ret = EXIT_PC_UPDATED;
1203            goto egress;
1204        }
1205    }
1206
1207    if (use_goto_tb(s, s->next_pc)) {
1208        if (is_imm && use_goto_tb(s, dest)) {
1209            /* Both exits can use goto_tb.  */
1210            update_cc_op(s);
1211
1212            lab = gen_new_label();
1213            if (c->is_64) {
1214                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1215            } else {
1216                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1217            }
1218
1219            /* Branch not taken.  */
1220            tcg_gen_goto_tb(0);
1221            tcg_gen_movi_i64(psw_addr, s->next_pc);
1222            tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1223
1224            /* Branch taken.  */
1225            gen_set_label(lab);
1226            per_breaking_event(s);
1227            tcg_gen_goto_tb(1);
1228            tcg_gen_movi_i64(psw_addr, dest);
1229            tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1230
1231            ret = EXIT_GOTO_TB;
1232        } else {
1233            /* Fallthru can use goto_tb, but taken branch cannot.  */
1234            /* Store taken branch destination before the brcond.  This
1235               avoids having to allocate a new local temp to hold it.
1236               We'll overwrite this in the not taken case anyway.  */
1237            if (!is_imm) {
1238                tcg_gen_mov_i64(psw_addr, cdest);
1239            }
1240
1241            lab = gen_new_label();
1242            if (c->is_64) {
1243                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1244            } else {
1245                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1246            }
1247
1248            /* Branch not taken.  */
1249            update_cc_op(s);
1250            tcg_gen_goto_tb(0);
1251            tcg_gen_movi_i64(psw_addr, s->next_pc);
1252            tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1253
1254            gen_set_label(lab);
1255            if (is_imm) {
1256                tcg_gen_movi_i64(psw_addr, dest);
1257            }
1258            per_breaking_event(s);
1259            ret = EXIT_PC_UPDATED;
1260        }
1261    } else {
1262        /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1263           Most commonly we're single-stepping or some other condition that
1264           disables all use of goto_tb.  Just update the PC and exit.  */
1265
1266        TCGv_i64 next = tcg_const_i64(s->next_pc);
1267        if (is_imm) {
1268            cdest = tcg_const_i64(dest);
1269        }
1270
1271        if (c->is_64) {
1272            tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1273                                cdest, next);
1274            per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1275        } else {
1276            TCGv_i32 t0 = tcg_temp_new_i32();
1277            TCGv_i64 t1 = tcg_temp_new_i64();
1278            TCGv_i64 z = tcg_const_i64(0);
1279            tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1280            tcg_gen_extu_i32_i64(t1, t0);
1281            tcg_temp_free_i32(t0);
1282            tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1283            per_branch_cond(s, TCG_COND_NE, t1, z);
1284            tcg_temp_free_i64(t1);
1285            tcg_temp_free_i64(z);
1286        }
1287
1288        if (is_imm) {
1289            tcg_temp_free_i64(cdest);
1290        }
1291        tcg_temp_free_i64(next);
1292
1293        ret = EXIT_PC_UPDATED;
1294    }
1295
1296 egress:
1297    free_compare(c);
1298    return ret;
1299}
1300
1301/* ====================================================================== */
1302/* The operations.  These perform the bulk of the work for any insn,
1303   usually after the operands have been loaded and output initialized.  */
1304
1305static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1306{
1307    TCGv_i64 z, n;
1308    z = tcg_const_i64(0);
1309    n = tcg_temp_new_i64();
1310    tcg_gen_neg_i64(n, o->in2);
1311    tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1312    tcg_temp_free_i64(n);
1313    tcg_temp_free_i64(z);
1314    return NO_EXIT;
1315}
1316
1317static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1318{
1319    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1320    return NO_EXIT;
1321}
1322
1323static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1324{
1325    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1326    return NO_EXIT;
1327}
1328
1329static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1330{
1331    tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1332    tcg_gen_mov_i64(o->out2, o->in2);
1333    return NO_EXIT;
1334}
1335
1336static ExitStatus op_add(DisasContext *s, DisasOps *o)
1337{
1338    tcg_gen_add_i64(o->out, o->in1, o->in2);
1339    return NO_EXIT;
1340}
1341
1342static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1343{
1344    DisasCompare cmp;
1345    TCGv_i64 carry;
1346
1347    tcg_gen_add_i64(o->out, o->in1, o->in2);
1348
1349    /* The carry flag is the msb of CC, therefore the branch mask that would
1350       create that comparison is 3.  Feeding the generated comparison to
1351       setcond produces the carry flag that we desire.  */
1352    disas_jcc(s, &cmp, 3);
1353    carry = tcg_temp_new_i64();
1354    if (cmp.is_64) {
1355        tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1356    } else {
1357        TCGv_i32 t = tcg_temp_new_i32();
1358        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1359        tcg_gen_extu_i32_i64(carry, t);
1360        tcg_temp_free_i32(t);
1361    }
1362    free_compare(&cmp);
1363
1364    tcg_gen_add_i64(o->out, o->out, carry);
1365    tcg_temp_free_i64(carry);
1366    return NO_EXIT;
1367}
1368
1369static ExitStatus op_asi(DisasContext *s, DisasOps *o)
1370{
1371    o->in1 = tcg_temp_new_i64();
1372
1373    if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1374        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1375    } else {
1376        /* Perform the atomic addition in memory. */
1377        tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1378                                     s->insn->data);
1379    }
1380
1381    /* Recompute also for atomic case: needed for setting CC. */
1382    tcg_gen_add_i64(o->out, o->in1, o->in2);
1383
1384    if (!s390_has_feat(S390_FEAT_STFLE_45)) {
1385        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1386    }
1387    return NO_EXIT;
1388}
1389
1390static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1391{
1392    gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1393    return NO_EXIT;
1394}
1395
1396static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1397{
1398    gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1399    return NO_EXIT;
1400}
1401
1402static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1403{
1404    gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1405    return_low128(o->out2);
1406    return NO_EXIT;
1407}
1408
1409static ExitStatus op_and(DisasContext *s, DisasOps *o)
1410{
1411    tcg_gen_and_i64(o->out, o->in1, o->in2);
1412    return NO_EXIT;
1413}
1414
1415static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1416{
1417    int shift = s->insn->data & 0xff;
1418    int size = s->insn->data >> 8;
1419    uint64_t mask = ((1ull << size) - 1) << shift;
1420
1421    assert(!o->g_in2);
1422    tcg_gen_shli_i64(o->in2, o->in2, shift);
1423    tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1424    tcg_gen_and_i64(o->out, o->in1, o->in2);
1425
1426    /* Produce the CC from only the bits manipulated.  */
1427    tcg_gen_andi_i64(cc_dst, o->out, mask);
1428    set_cc_nz_u64(s, cc_dst);
1429    return NO_EXIT;
1430}
1431
1432static ExitStatus op_ni(DisasContext *s, DisasOps *o)
1433{
1434    o->in1 = tcg_temp_new_i64();
1435
1436    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1437        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1438    } else {
1439        /* Perform the atomic operation in memory. */
1440        tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1441                                     s->insn->data);
1442    }
1443
1444    /* Recompute also for atomic case: needed for setting CC. */
1445    tcg_gen_and_i64(o->out, o->in1, o->in2);
1446
1447    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1448        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1449    }
1450    return NO_EXIT;
1451}
1452
1453static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1454{
1455    tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1456    if (o->in2) {
1457        tcg_gen_mov_i64(psw_addr, o->in2);
1458        per_branch(s, false);
1459        return EXIT_PC_UPDATED;
1460    } else {
1461        return NO_EXIT;
1462    }
1463}
1464
1465static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1466{
1467    tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1468    return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1469}
1470
1471static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1472{
1473    int m1 = get_field(s->fields, m1);
1474    bool is_imm = have_field(s->fields, i2);
1475    int imm = is_imm ? get_field(s->fields, i2) : 0;
1476    DisasCompare c;
1477
1478    /* BCR with R2 = 0 causes no branching */
1479    if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1480        if (m1 == 14) {
1481            /* Perform serialization */
1482            /* FIXME: check for fast-BCR-serialization facility */
1483            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1484        }
1485        if (m1 == 15) {
1486            /* Perform serialization */
1487            /* FIXME: perform checkpoint-synchronisation */
1488            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1489        }
1490        return NO_EXIT;
1491    }
1492
1493    disas_jcc(s, &c, m1);
1494    return help_branch(s, &c, is_imm, imm, o->in2);
1495}
1496
1497static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1498{
1499    int r1 = get_field(s->fields, r1);
1500    bool is_imm = have_field(s->fields, i2);
1501    int imm = is_imm ? get_field(s->fields, i2) : 0;
1502    DisasCompare c;
1503    TCGv_i64 t;
1504
1505    c.cond = TCG_COND_NE;
1506    c.is_64 = false;
1507    c.g1 = false;
1508    c.g2 = false;
1509
1510    t = tcg_temp_new_i64();
1511    tcg_gen_subi_i64(t, regs[r1], 1);
1512    store_reg32_i64(r1, t);
1513    c.u.s32.a = tcg_temp_new_i32();
1514    c.u.s32.b = tcg_const_i32(0);
1515    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1516    tcg_temp_free_i64(t);
1517
1518    return help_branch(s, &c, is_imm, imm, o->in2);
1519}
1520
1521static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1522{
1523    int r1 = get_field(s->fields, r1);
1524    int imm = get_field(s->fields, i2);
1525    DisasCompare c;
1526    TCGv_i64 t;
1527
1528    c.cond = TCG_COND_NE;
1529    c.is_64 = false;
1530    c.g1 = false;
1531    c.g2 = false;
1532
1533    t = tcg_temp_new_i64();
1534    tcg_gen_shri_i64(t, regs[r1], 32);
1535    tcg_gen_subi_i64(t, t, 1);
1536    store_reg32h_i64(r1, t);
1537    c.u.s32.a = tcg_temp_new_i32();
1538    c.u.s32.b = tcg_const_i32(0);
1539    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1540    tcg_temp_free_i64(t);
1541
1542    return help_branch(s, &c, 1, imm, o->in2);
1543}
1544
1545static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1546{
1547    int r1 = get_field(s->fields, r1);
1548    bool is_imm = have_field(s->fields, i2);
1549    int imm = is_imm ? get_field(s->fields, i2) : 0;
1550    DisasCompare c;
1551
1552    c.cond = TCG_COND_NE;
1553    c.is_64 = true;
1554    c.g1 = true;
1555    c.g2 = false;
1556
1557    tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1558    c.u.s64.a = regs[r1];
1559    c.u.s64.b = tcg_const_i64(0);
1560
1561    return help_branch(s, &c, is_imm, imm, o->in2);
1562}
1563
1564static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1565{
1566    int r1 = get_field(s->fields, r1);
1567    int r3 = get_field(s->fields, r3);
1568    bool is_imm = have_field(s->fields, i2);
1569    int imm = is_imm ? get_field(s->fields, i2) : 0;
1570    DisasCompare c;
1571    TCGv_i64 t;
1572
1573    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1574    c.is_64 = false;
1575    c.g1 = false;
1576    c.g2 = false;
1577
1578    t = tcg_temp_new_i64();
1579    tcg_gen_add_i64(t, regs[r1], regs[r3]);
1580    c.u.s32.a = tcg_temp_new_i32();
1581    c.u.s32.b = tcg_temp_new_i32();
1582    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1583    tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1584    store_reg32_i64(r1, t);
1585    tcg_temp_free_i64(t);
1586
1587    return help_branch(s, &c, is_imm, imm, o->in2);
1588}
1589
1590static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1591{
1592    int r1 = get_field(s->fields, r1);
1593    int r3 = get_field(s->fields, r3);
1594    bool is_imm = have_field(s->fields, i2);
1595    int imm = is_imm ? get_field(s->fields, i2) : 0;
1596    DisasCompare c;
1597
1598    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1599    c.is_64 = true;
1600
1601    if (r1 == (r3 | 1)) {
1602        c.u.s64.b = load_reg(r3 | 1);
1603        c.g2 = false;
1604    } else {
1605        c.u.s64.b = regs[r3 | 1];
1606        c.g2 = true;
1607    }
1608
1609    tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1610    c.u.s64.a = regs[r1];
1611    c.g1 = true;
1612
1613    return help_branch(s, &c, is_imm, imm, o->in2);
1614}
1615
1616static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1617{
1618    int imm, m3 = get_field(s->fields, m3);
1619    bool is_imm;
1620    DisasCompare c;
1621
1622    c.cond = ltgt_cond[m3];
1623    if (s->insn->data) {
1624        c.cond = tcg_unsigned_cond(c.cond);
1625    }
1626    c.is_64 = c.g1 = c.g2 = true;
1627    c.u.s64.a = o->in1;
1628    c.u.s64.b = o->in2;
1629
1630    is_imm = have_field(s->fields, i4);
1631    if (is_imm) {
1632        imm = get_field(s->fields, i4);
1633    } else {
1634        imm = 0;
1635        o->out = get_address(s, 0, get_field(s->fields, b4),
1636                             get_field(s->fields, d4));
1637    }
1638
1639    return help_branch(s, &c, is_imm, imm, o->out);
1640}
1641
1642static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1643{
1644    gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1645    set_cc_static(s);
1646    return NO_EXIT;
1647}
1648
1649static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1650{
1651    gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1652    set_cc_static(s);
1653    return NO_EXIT;
1654}
1655
1656static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1657{
1658    gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1659    set_cc_static(s);
1660    return NO_EXIT;
1661}
1662
1663static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1664{
1665    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1666    gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1667    tcg_temp_free_i32(m3);
1668    gen_set_cc_nz_f32(s, o->in2);
1669    return NO_EXIT;
1670}
1671
1672static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1673{
1674    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1675    gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1676    tcg_temp_free_i32(m3);
1677    gen_set_cc_nz_f64(s, o->in2);
1678    return NO_EXIT;
1679}
1680
1681static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1682{
1683    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1684    gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1685    tcg_temp_free_i32(m3);
1686    gen_set_cc_nz_f128(s, o->in1, o->in2);
1687    return NO_EXIT;
1688}
1689
1690static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1691{
1692    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1693    gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1694    tcg_temp_free_i32(m3);
1695    gen_set_cc_nz_f32(s, o->in2);
1696    return NO_EXIT;
1697}
1698
1699static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1700{
1701    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1702    gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1703    tcg_temp_free_i32(m3);
1704    gen_set_cc_nz_f64(s, o->in2);
1705    return NO_EXIT;
1706}
1707
1708static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1709{
1710    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1711    gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1712    tcg_temp_free_i32(m3);
1713    gen_set_cc_nz_f128(s, o->in1, o->in2);
1714    return NO_EXIT;
1715}
1716
1717static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1718{
1719    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1720    gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1721    tcg_temp_free_i32(m3);
1722    gen_set_cc_nz_f32(s, o->in2);
1723    return NO_EXIT;
1724}
1725
1726static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1727{
1728    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1729    gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1730    tcg_temp_free_i32(m3);
1731    gen_set_cc_nz_f64(s, o->in2);
1732    return NO_EXIT;
1733}
1734
1735static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1736{
1737    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1738    gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1739    tcg_temp_free_i32(m3);
1740    gen_set_cc_nz_f128(s, o->in1, o->in2);
1741    return NO_EXIT;
1742}
1743
1744static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1745{
1746    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1747    gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1748    tcg_temp_free_i32(m3);
1749    gen_set_cc_nz_f32(s, o->in2);
1750    return NO_EXIT;
1751}
1752
1753static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1754{
1755    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1756    gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1757    tcg_temp_free_i32(m3);
1758    gen_set_cc_nz_f64(s, o->in2);
1759    return NO_EXIT;
1760}
1761
1762static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1763{
1764    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1765    gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1766    tcg_temp_free_i32(m3);
1767    gen_set_cc_nz_f128(s, o->in1, o->in2);
1768    return NO_EXIT;
1769}
1770
1771static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1772{
1773    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1774    gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1775    tcg_temp_free_i32(m3);
1776    return NO_EXIT;
1777}
1778
1779static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1780{
1781    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1782    gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1783    tcg_temp_free_i32(m3);
1784    return NO_EXIT;
1785}
1786
1787static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1788{
1789    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1790    gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1791    tcg_temp_free_i32(m3);
1792    return_low128(o->out2);
1793    return NO_EXIT;
1794}
1795
1796static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1797{
1798    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1799    gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1800    tcg_temp_free_i32(m3);
1801    return NO_EXIT;
1802}
1803
1804static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1805{
1806    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1807    gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1808    tcg_temp_free_i32(m3);
1809    return NO_EXIT;
1810}
1811
1812static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1813{
1814    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1815    gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1816    tcg_temp_free_i32(m3);
1817    return_low128(o->out2);
1818    return NO_EXIT;
1819}
1820
1821static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1822{
1823    int r2 = get_field(s->fields, r2);
1824    TCGv_i64 len = tcg_temp_new_i64();
1825
1826    gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1827    set_cc_static(s);
1828    return_low128(o->out);
1829
1830    tcg_gen_add_i64(regs[r2], regs[r2], len);
1831    tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1832    tcg_temp_free_i64(len);
1833
1834    return NO_EXIT;
1835}
1836
1837static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1838{
1839    int l = get_field(s->fields, l1);
1840    TCGv_i32 vl;
1841
1842    switch (l + 1) {
1843    case 1:
1844        tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1845        tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1846        break;
1847    case 2:
1848        tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1849        tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1850        break;
1851    case 4:
1852        tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1853        tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1854        break;
1855    case 8:
1856        tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1857        tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1858        break;
1859    default:
1860        vl = tcg_const_i32(l);
1861        gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1862        tcg_temp_free_i32(vl);
1863        set_cc_static(s);
1864        return NO_EXIT;
1865    }
1866    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1867    return NO_EXIT;
1868}
1869
1870static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1871{
1872    int r1 = get_field(s->fields, r1);
1873    int r2 = get_field(s->fields, r2);
1874    TCGv_i32 t1, t2;
1875
1876    /* r1 and r2 must be even.  */
1877    if (r1 & 1 || r2 & 1) {
1878        gen_program_exception(s, PGM_SPECIFICATION);
1879        return EXIT_NORETURN;
1880    }
1881
1882    t1 = tcg_const_i32(r1);
1883    t2 = tcg_const_i32(r2);
1884    gen_helper_clcl(cc_op, cpu_env, t1, t2);
1885    tcg_temp_free_i32(t1);
1886    tcg_temp_free_i32(t2);
1887    set_cc_static(s);
1888    return NO_EXIT;
1889}
1890
1891static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1892{
1893    int r1 = get_field(s->fields, r1);
1894    int r3 = get_field(s->fields, r3);
1895    TCGv_i32 t1, t3;
1896
1897    /* r1 and r3 must be even.  */
1898    if (r1 & 1 || r3 & 1) {
1899        gen_program_exception(s, PGM_SPECIFICATION);
1900        return EXIT_NORETURN;
1901    }
1902
1903    t1 = tcg_const_i32(r1);
1904    t3 = tcg_const_i32(r3);
1905    gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1906    tcg_temp_free_i32(t1);
1907    tcg_temp_free_i32(t3);
1908    set_cc_static(s);
1909    return NO_EXIT;
1910}
1911
1912static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
1913{
1914    int r1 = get_field(s->fields, r1);
1915    int r3 = get_field(s->fields, r3);
1916    TCGv_i32 t1, t3;
1917
1918    /* r1 and r3 must be even.  */
1919    if (r1 & 1 || r3 & 1) {
1920        gen_program_exception(s, PGM_SPECIFICATION);
1921        return EXIT_NORETURN;
1922    }
1923
1924    t1 = tcg_const_i32(r1);
1925    t3 = tcg_const_i32(r3);
1926    gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1927    tcg_temp_free_i32(t1);
1928    tcg_temp_free_i32(t3);
1929    set_cc_static(s);
1930    return NO_EXIT;
1931}
1932
1933static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1934{
1935    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1936    TCGv_i32 t1 = tcg_temp_new_i32();
1937    tcg_gen_extrl_i64_i32(t1, o->in1);
1938    gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1939    set_cc_static(s);
1940    tcg_temp_free_i32(t1);
1941    tcg_temp_free_i32(m3);
1942    return NO_EXIT;
1943}
1944
1945static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1946{
1947    gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1948    set_cc_static(s);
1949    return_low128(o->in2);
1950    return NO_EXIT;
1951}
1952
1953static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1954{
1955    TCGv_i64 t = tcg_temp_new_i64();
1956    tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1957    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1958    tcg_gen_or_i64(o->out, o->out, t);
1959    tcg_temp_free_i64(t);
1960    return NO_EXIT;
1961}
1962
1963static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1964{
1965    int d2 = get_field(s->fields, d2);
1966    int b2 = get_field(s->fields, b2);
1967    TCGv_i64 addr, cc;
1968
1969    /* Note that in1 = R3 (new value) and
1970       in2 = (zero-extended) R1 (expected value).  */
1971
1972    addr = get_address(s, 0, b2, d2);
1973    tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1974                               get_mem_index(s), s->insn->data | MO_ALIGN);
1975    tcg_temp_free_i64(addr);
1976
1977    /* Are the memory and expected values (un)equal?  Note that this setcond
1978       produces the output CC value, thus the NE sense of the test.  */
1979    cc = tcg_temp_new_i64();
1980    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1981    tcg_gen_extrl_i64_i32(cc_op, cc);
1982    tcg_temp_free_i64(cc);
1983    set_cc_static(s);
1984
1985    return NO_EXIT;
1986}
1987
1988static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1989{
1990    int r1 = get_field(s->fields, r1);
1991    int r3 = get_field(s->fields, r3);
1992    int d2 = get_field(s->fields, d2);
1993    int b2 = get_field(s->fields, b2);
1994    TCGv_i64 addr;
1995    TCGv_i32 t_r1, t_r3;
1996
1997    /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
1998    addr = get_address(s, 0, b2, d2);
1999    t_r1 = tcg_const_i32(r1);
2000    t_r3 = tcg_const_i32(r3);
2001    if (tb_cflags(s->tb) & CF_PARALLEL) {
2002        gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2003    } else {
2004        gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2005    }
2006    tcg_temp_free_i64(addr);
2007    tcg_temp_free_i32(t_r1);
2008    tcg_temp_free_i32(t_r3);
2009
2010    set_cc_static(s);
2011    return NO_EXIT;
2012}
2013
2014static ExitStatus op_csst(DisasContext *s, DisasOps *o)
2015{
2016    int r3 = get_field(s->fields, r3);
2017    TCGv_i32 t_r3 = tcg_const_i32(r3);
2018
2019    if (tb_cflags(s->tb) & CF_PARALLEL) {
2020        gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->in1, o->in2);
2021    } else {
2022        gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
2023    }
2024    tcg_temp_free_i32(t_r3);
2025
2026    set_cc_static(s);
2027    return NO_EXIT;
2028}
2029
2030#ifndef CONFIG_USER_ONLY
2031static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2032{
2033    TCGMemOp mop = s->insn->data;
2034    TCGv_i64 addr, old, cc;
2035    TCGLabel *lab = gen_new_label();
2036
2037    /* Note that in1 = R1 (zero-extended expected value),
2038       out = R1 (original reg), out2 = R1+1 (new value).  */
2039
2040    check_privileged(s);
2041    addr = tcg_temp_new_i64();
2042    old = tcg_temp_new_i64();
2043    tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2044    tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2045                               get_mem_index(s), mop | MO_ALIGN);
2046    tcg_temp_free_i64(addr);
2047
2048    /* Are the memory and expected values (un)equal?  */
2049    cc = tcg_temp_new_i64();
2050    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2051    tcg_gen_extrl_i64_i32(cc_op, cc);
2052
2053    /* Write back the output now, so that it happens before the
2054       following branch, so that we don't need local temps.  */
2055    if ((mop & MO_SIZE) == MO_32) {
2056        tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2057    } else {
2058        tcg_gen_mov_i64(o->out, old);
2059    }
2060    tcg_temp_free_i64(old);
2061
2062    /* If the comparison was equal, and the LSB of R2 was set,
2063       then we need to flush the TLB (for all cpus).  */
2064    tcg_gen_xori_i64(cc, cc, 1);
2065    tcg_gen_and_i64(cc, cc, o->in2);
2066    tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2067    tcg_temp_free_i64(cc);
2068
2069    gen_helper_purge(cpu_env);
2070    gen_set_label(lab);
2071
2072    return NO_EXIT;
2073}
2074#endif
2075
2076static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2077{
2078    TCGv_i64 t1 = tcg_temp_new_i64();
2079    TCGv_i32 t2 = tcg_temp_new_i32();
2080    tcg_gen_extrl_i64_i32(t2, o->in1);
2081    gen_helper_cvd(t1, t2);
2082    tcg_temp_free_i32(t2);
2083    tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2084    tcg_temp_free_i64(t1);
2085    return NO_EXIT;
2086}
2087
2088static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2089{
2090    int m3 = get_field(s->fields, m3);
2091    TCGLabel *lab = gen_new_label();
2092    TCGCond c;
2093
2094    c = tcg_invert_cond(ltgt_cond[m3]);
2095    if (s->insn->data) {
2096        c = tcg_unsigned_cond(c);
2097    }
2098    tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2099
2100    /* Trap.  */
2101    gen_trap(s);
2102
2103    gen_set_label(lab);
2104    return NO_EXIT;
2105}
2106
2107static ExitStatus op_cuXX(DisasContext *s, DisasOps *o)
2108{
2109    int m3 = get_field(s->fields, m3);
2110    int r1 = get_field(s->fields, r1);
2111    int r2 = get_field(s->fields, r2);
2112    TCGv_i32 tr1, tr2, chk;
2113
2114    /* R1 and R2 must both be even.  */
2115    if ((r1 | r2) & 1) {
2116        gen_program_exception(s, PGM_SPECIFICATION);
2117        return EXIT_NORETURN;
2118    }
2119    if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2120        m3 = 0;
2121    }
2122
2123    tr1 = tcg_const_i32(r1);
2124    tr2 = tcg_const_i32(r2);
2125    chk = tcg_const_i32(m3);
2126
2127    switch (s->insn->data) {
2128    case 12:
2129        gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2130        break;
2131    case 14:
2132        gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2133        break;
2134    case 21:
2135        gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2136        break;
2137    case 24:
2138        gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2139        break;
2140    case 41:
2141        gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2142        break;
2143    case 42:
2144        gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2145        break;
2146    default:
2147        g_assert_not_reached();
2148    }
2149
2150    tcg_temp_free_i32(tr1);
2151    tcg_temp_free_i32(tr2);
2152    tcg_temp_free_i32(chk);
2153    set_cc_static(s);
2154    return NO_EXIT;
2155}
2156
2157#ifndef CONFIG_USER_ONLY
2158static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2159{
2160    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2161    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2162    TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2163
2164    check_privileged(s);
2165    gen_helper_diag(cpu_env, r1, r3, func_code);
2166
2167    tcg_temp_free_i32(func_code);
2168    tcg_temp_free_i32(r3);
2169    tcg_temp_free_i32(r1);
2170    return NO_EXIT;
2171}
2172#endif
2173
2174static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2175{
2176    gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2177    return_low128(o->out);
2178    return NO_EXIT;
2179}
2180
2181static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2182{
2183    gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2184    return_low128(o->out);
2185    return NO_EXIT;
2186}
2187
2188static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2189{
2190    gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2191    return_low128(o->out);
2192    return NO_EXIT;
2193}
2194
2195static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2196{
2197    gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2198    return_low128(o->out);
2199    return NO_EXIT;
2200}
2201
2202static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2203{
2204    gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2205    return NO_EXIT;
2206}
2207
2208static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2209{
2210    gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2211    return NO_EXIT;
2212}
2213
2214static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2215{
2216    gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2217    return_low128(o->out2);
2218    return NO_EXIT;
2219}
2220
2221static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2222{
2223    int r2 = get_field(s->fields, r2);
2224    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2225    return NO_EXIT;
2226}
2227
2228static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2229{
2230    /* No cache information provided.  */
2231    tcg_gen_movi_i64(o->out, -1);
2232    return NO_EXIT;
2233}
2234
2235static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2236{
2237    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2238    return NO_EXIT;
2239}
2240
2241static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2242{
2243    int r1 = get_field(s->fields, r1);
2244    int r2 = get_field(s->fields, r2);
2245    TCGv_i64 t = tcg_temp_new_i64();
2246
2247    /* Note the "subsequently" in the PoO, which implies a defined result
2248       if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2249    tcg_gen_shri_i64(t, psw_mask, 32);
2250    store_reg32_i64(r1, t);
2251    if (r2 != 0) {
2252        store_reg32_i64(r2, psw_mask);
2253    }
2254
2255    tcg_temp_free_i64(t);
2256    return NO_EXIT;
2257}
2258
2259static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2260{
2261    int r1 = get_field(s->fields, r1);
2262    TCGv_i32 ilen;
2263    TCGv_i64 v1;
2264
2265    /* Nested EXECUTE is not allowed.  */
2266    if (unlikely(s->ex_value)) {
2267        gen_program_exception(s, PGM_EXECUTE);
2268        return EXIT_NORETURN;
2269    }
2270
2271    update_psw_addr(s);
2272    update_cc_op(s);
2273
2274    if (r1 == 0) {
2275        v1 = tcg_const_i64(0);
2276    } else {
2277        v1 = regs[r1];
2278    }
2279
2280    ilen = tcg_const_i32(s->ilen);
2281    gen_helper_ex(cpu_env, ilen, v1, o->in2);
2282    tcg_temp_free_i32(ilen);
2283
2284    if (r1 == 0) {
2285        tcg_temp_free_i64(v1);
2286    }
2287
2288    return EXIT_PC_CC_UPDATED;
2289}
2290
2291static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2292{
2293    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2294    gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2295    tcg_temp_free_i32(m3);
2296    return NO_EXIT;
2297}
2298
2299static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2300{
2301    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2302    gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2303    tcg_temp_free_i32(m3);
2304    return NO_EXIT;
2305}
2306
2307static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2308{
2309    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2310    gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2311    return_low128(o->out2);
2312    tcg_temp_free_i32(m3);
2313    return NO_EXIT;
2314}
2315
2316static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2317{
2318    /* We'll use the original input for cc computation, since we get to
2319       compare that against 0, which ought to be better than comparing
2320       the real output against 64.  It also lets cc_dst be a convenient
2321       temporary during our computation.  */
2322    gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2323
2324    /* R1 = IN ? CLZ(IN) : 64.  */
2325    tcg_gen_clzi_i64(o->out, o->in2, 64);
2326
2327    /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2328       value by 64, which is undefined.  But since the shift is 64 iff the
2329       input is zero, we still get the correct result after and'ing.  */
2330    tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2331    tcg_gen_shr_i64(o->out2, o->out2, o->out);
2332    tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2333    return NO_EXIT;
2334}
2335
2336static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2337{
2338    int m3 = get_field(s->fields, m3);
2339    int pos, len, base = s->insn->data;
2340    TCGv_i64 tmp = tcg_temp_new_i64();
2341    uint64_t ccm;
2342
2343    switch (m3) {
2344    case 0xf:
2345        /* Effectively a 32-bit load.  */
2346        tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2347        len = 32;
2348        goto one_insert;
2349
2350    case 0xc:
2351    case 0x6:
2352    case 0x3:
2353        /* Effectively a 16-bit load.  */
2354        tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2355        len = 16;
2356        goto one_insert;
2357
2358    case 0x8:
2359    case 0x4:
2360    case 0x2:
2361    case 0x1:
2362        /* Effectively an 8-bit load.  */
2363        tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2364        len = 8;
2365        goto one_insert;
2366
2367    one_insert:
2368        pos = base + ctz32(m3) * 8;
2369        tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2370        ccm = ((1ull << len) - 1) << pos;
2371        break;
2372
2373    default:
2374        /* This is going to be a sequence of loads and inserts.  */
2375        pos = base + 32 - 8;
2376        ccm = 0;
2377        while (m3) {
2378            if (m3 & 0x8) {
2379                tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2380                tcg_gen_addi_i64(o->in2, o->in2, 1);
2381                tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2382                ccm |= 0xff << pos;
2383            }
2384            m3 = (m3 << 1) & 0xf;
2385            pos -= 8;
2386        }
2387        break;
2388    }
2389
2390    tcg_gen_movi_i64(tmp, ccm);
2391    gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2392    tcg_temp_free_i64(tmp);
2393    return NO_EXIT;
2394}
2395
2396static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2397{
2398    int shift = s->insn->data & 0xff;
2399    int size = s->insn->data >> 8;
2400    tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2401    return NO_EXIT;
2402}
2403
2404static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2405{
2406    TCGv_i64 t1;
2407
2408    gen_op_calc_cc(s);
2409    tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2410
2411    t1 = tcg_temp_new_i64();
2412    tcg_gen_shli_i64(t1, psw_mask, 20);
2413    tcg_gen_shri_i64(t1, t1, 36);
2414    tcg_gen_or_i64(o->out, o->out, t1);
2415
2416    tcg_gen_extu_i32_i64(t1, cc_op);
2417    tcg_gen_shli_i64(t1, t1, 28);
2418    tcg_gen_or_i64(o->out, o->out, t1);
2419    tcg_temp_free_i64(t1);
2420    return NO_EXIT;
2421}
2422
2423#ifndef CONFIG_USER_ONLY
2424static ExitStatus op_idte(DisasContext *s, DisasOps *o)
2425{
2426    TCGv_i32 m4;
2427
2428    check_privileged(s);
2429    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2430        m4 = tcg_const_i32(get_field(s->fields, m4));
2431    } else {
2432        m4 = tcg_const_i32(0);
2433    }
2434    gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2435    tcg_temp_free_i32(m4);
2436    return NO_EXIT;
2437}
2438
2439static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2440{
2441    TCGv_i32 m4;
2442
2443    check_privileged(s);
2444    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2445        m4 = tcg_const_i32(get_field(s->fields, m4));
2446    } else {
2447        m4 = tcg_const_i32(0);
2448    }
2449    gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2450    tcg_temp_free_i32(m4);
2451    return NO_EXIT;
2452}
2453
2454static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2455{
2456    check_privileged(s);
2457    gen_helper_iske(o->out, cpu_env, o->in2);
2458    return NO_EXIT;
2459}
2460#endif
2461
2462static ExitStatus op_msa(DisasContext *s, DisasOps *o)
2463{
2464    int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2465    int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2466    int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2467    TCGv_i32 t_r1, t_r2, t_r3, type;
2468
2469    switch (s->insn->data) {
2470    case S390_FEAT_TYPE_KMCTR:
2471        if (r3 & 1 || !r3) {
2472            gen_program_exception(s, PGM_SPECIFICATION);
2473            return EXIT_NORETURN;
2474        }
2475        /* FALL THROUGH */
2476    case S390_FEAT_TYPE_PPNO:
2477    case S390_FEAT_TYPE_KMF:
2478    case S390_FEAT_TYPE_KMC:
2479    case S390_FEAT_TYPE_KMO:
2480    case S390_FEAT_TYPE_KM:
2481        if (r1 & 1 || !r1) {
2482            gen_program_exception(s, PGM_SPECIFICATION);
2483            return EXIT_NORETURN;
2484        }
2485        /* FALL THROUGH */
2486    case S390_FEAT_TYPE_KMAC:
2487    case S390_FEAT_TYPE_KIMD:
2488    case S390_FEAT_TYPE_KLMD:
2489        if (r2 & 1 || !r2) {
2490            gen_program_exception(s, PGM_SPECIFICATION);
2491            return EXIT_NORETURN;
2492        }
2493        /* FALL THROUGH */
2494    case S390_FEAT_TYPE_PCKMO:
2495    case S390_FEAT_TYPE_PCC:
2496        break;
2497    default:
2498        g_assert_not_reached();
2499    };
2500
2501    t_r1 = tcg_const_i32(r1);
2502    t_r2 = tcg_const_i32(r2);
2503    t_r3 = tcg_const_i32(r3);
2504    type = tcg_const_i32(s->insn->data);
2505    gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2506    set_cc_static(s);
2507    tcg_temp_free_i32(t_r1);
2508    tcg_temp_free_i32(t_r2);
2509    tcg_temp_free_i32(t_r3);
2510    tcg_temp_free_i32(type);
2511    return NO_EXIT;
2512}
2513
2514static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2515{
2516    gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2517    set_cc_static(s);
2518    return NO_EXIT;
2519}
2520
2521static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2522{
2523    gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2524    set_cc_static(s);
2525    return NO_EXIT;
2526}
2527
2528static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2529{
2530    gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2531    set_cc_static(s);
2532    return NO_EXIT;
2533}
2534
2535static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2536{
2537    /* The real output is indeed the original value in memory;
2538       recompute the addition for the computation of CC.  */
2539    tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2540                                 s->insn->data | MO_ALIGN);
2541    /* However, we need to recompute the addition for setting CC.  */
2542    tcg_gen_add_i64(o->out, o->in1, o->in2);
2543    return NO_EXIT;
2544}
2545
2546static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2547{
2548    /* The real output is indeed the original value in memory;
2549       recompute the addition for the computation of CC.  */
2550    tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2551                                 s->insn->data | MO_ALIGN);
2552    /* However, we need to recompute the operation for setting CC.  */
2553    tcg_gen_and_i64(o->out, o->in1, o->in2);
2554    return NO_EXIT;
2555}
2556
2557static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2558{
2559    /* The real output is indeed the original value in memory;
2560       recompute the addition for the computation of CC.  */
2561    tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2562                                s->insn->data | MO_ALIGN);
2563    /* However, we need to recompute the operation for setting CC.  */
2564    tcg_gen_or_i64(o->out, o->in1, o->in2);
2565    return NO_EXIT;
2566}
2567
2568static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2569{
2570    /* The real output is indeed the original value in memory;
2571       recompute the addition for the computation of CC.  */
2572    tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2573                                 s->insn->data | MO_ALIGN);
2574    /* However, we need to recompute the operation for setting CC.  */
2575    tcg_gen_xor_i64(o->out, o->in1, o->in2);
2576    return NO_EXIT;
2577}
2578
2579static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2580{
2581    gen_helper_ldeb(o->out, cpu_env, o->in2);
2582    return NO_EXIT;
2583}
2584
2585static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2586{
2587    gen_helper_ledb(o->out, cpu_env, o->in2);
2588    return NO_EXIT;
2589}
2590
2591static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2592{
2593    gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2594    return NO_EXIT;
2595}
2596
2597static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2598{
2599    gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2600    return NO_EXIT;
2601}
2602
2603static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2604{
2605    gen_helper_lxdb(o->out, cpu_env, o->in2);
2606    return_low128(o->out2);
2607    return NO_EXIT;
2608}
2609
2610static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2611{
2612    gen_helper_lxeb(o->out, cpu_env, o->in2);
2613    return_low128(o->out2);
2614    return NO_EXIT;
2615}
2616
2617static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2618{
2619    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2620    return NO_EXIT;
2621}
2622
2623static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2624{
2625    tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2626    return NO_EXIT;
2627}
2628
2629static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2630{
2631    tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2632    return NO_EXIT;
2633}
2634
2635static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2636{
2637    tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2638    return NO_EXIT;
2639}
2640
2641static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2642{
2643    tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2644    return NO_EXIT;
2645}
2646
2647static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2648{
2649    tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2650    return NO_EXIT;
2651}
2652
2653static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2654{
2655    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2656    return NO_EXIT;
2657}
2658
2659static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2660{
2661    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2662    return NO_EXIT;
2663}
2664
2665static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2666{
2667    TCGLabel *lab = gen_new_label();
2668    store_reg32_i64(get_field(s->fields, r1), o->in2);
2669    /* The value is stored even in case of trap. */
2670    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2671    gen_trap(s);
2672    gen_set_label(lab);
2673    return NO_EXIT;
2674}
2675
2676static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2677{
2678    TCGLabel *lab = gen_new_label();
2679    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2680    /* The value is stored even in case of trap. */
2681    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2682    gen_trap(s);
2683    gen_set_label(lab);
2684    return NO_EXIT;
2685}
2686
2687static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2688{
2689    TCGLabel *lab = gen_new_label();
2690    store_reg32h_i64(get_field(s->fields, r1), o->in2);
2691    /* The value is stored even in case of trap. */
2692    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2693    gen_trap(s);
2694    gen_set_label(lab);
2695    return NO_EXIT;
2696}
2697
2698static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2699{
2700    TCGLabel *lab = gen_new_label();
2701    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2702    /* The value is stored even in case of trap. */
2703    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2704    gen_trap(s);
2705    gen_set_label(lab);
2706    return NO_EXIT;
2707}
2708
2709static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2710{
2711    TCGLabel *lab = gen_new_label();
2712    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2713    /* The value is stored even in case of trap. */
2714    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2715    gen_trap(s);
2716    gen_set_label(lab);
2717    return NO_EXIT;
2718}
2719
2720static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2721{
2722    DisasCompare c;
2723
2724    disas_jcc(s, &c, get_field(s->fields, m3));
2725
2726    if (c.is_64) {
2727        tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2728                            o->in2, o->in1);
2729        free_compare(&c);
2730    } else {
2731        TCGv_i32 t32 = tcg_temp_new_i32();
2732        TCGv_i64 t, z;
2733
2734        tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2735        free_compare(&c);
2736
2737        t = tcg_temp_new_i64();
2738        tcg_gen_extu_i32_i64(t, t32);
2739        tcg_temp_free_i32(t32);
2740
2741        z = tcg_const_i64(0);
2742        tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2743        tcg_temp_free_i64(t);
2744        tcg_temp_free_i64(z);
2745    }
2746
2747    return NO_EXIT;
2748}
2749
2750#ifndef CONFIG_USER_ONLY
2751static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2752{
2753    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2754    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2755    check_privileged(s);
2756    gen_helper_lctl(cpu_env, r1, o->in2, r3);
2757    tcg_temp_free_i32(r1);
2758    tcg_temp_free_i32(r3);
2759    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2760    return EXIT_PC_STALE_NOCHAIN;
2761}
2762
2763static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2764{
2765    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2766    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2767    check_privileged(s);
2768    gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2769    tcg_temp_free_i32(r1);
2770    tcg_temp_free_i32(r3);
2771    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2772    return EXIT_PC_STALE_NOCHAIN;
2773}
2774
2775static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2776{
2777    check_privileged(s);
2778    gen_helper_lra(o->out, cpu_env, o->in2);
2779    set_cc_static(s);
2780    return NO_EXIT;
2781}
2782
2783static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2784{
2785    check_privileged(s);
2786
2787    tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2788    return NO_EXIT;
2789}
2790
2791static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2792{
2793    TCGv_i64 t1, t2;
2794
2795    check_privileged(s);
2796    per_breaking_event(s);
2797
2798    t1 = tcg_temp_new_i64();
2799    t2 = tcg_temp_new_i64();
2800    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2801    tcg_gen_addi_i64(o->in2, o->in2, 4);
2802    tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2803    /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
2804    tcg_gen_shli_i64(t1, t1, 32);
2805    gen_helper_load_psw(cpu_env, t1, t2);
2806    tcg_temp_free_i64(t1);
2807    tcg_temp_free_i64(t2);
2808    return EXIT_NORETURN;
2809}
2810
2811static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2812{
2813    TCGv_i64 t1, t2;
2814
2815    check_privileged(s);
2816    per_breaking_event(s);
2817
2818    t1 = tcg_temp_new_i64();
2819    t2 = tcg_temp_new_i64();
2820    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2821    tcg_gen_addi_i64(o->in2, o->in2, 8);
2822    tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2823    gen_helper_load_psw(cpu_env, t1, t2);
2824    tcg_temp_free_i64(t1);
2825    tcg_temp_free_i64(t2);
2826    return EXIT_NORETURN;
2827}
2828#endif
2829
2830static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2831{
2832    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2833    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2834    gen_helper_lam(cpu_env, r1, o->in2, r3);
2835    tcg_temp_free_i32(r1);
2836    tcg_temp_free_i32(r3);
2837    return NO_EXIT;
2838}
2839
2840static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2841{
2842    int r1 = get_field(s->fields, r1);
2843    int r3 = get_field(s->fields, r3);
2844    TCGv_i64 t1, t2;
2845
2846    /* Only one register to read. */
2847    t1 = tcg_temp_new_i64();
2848    if (unlikely(r1 == r3)) {
2849        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2850        store_reg32_i64(r1, t1);
2851        tcg_temp_free(t1);
2852        return NO_EXIT;
2853    }
2854
2855    /* First load the values of the first and last registers to trigger
2856       possible page faults. */
2857    t2 = tcg_temp_new_i64();
2858    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2859    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2860    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2861    store_reg32_i64(r1, t1);
2862    store_reg32_i64(r3, t2);
2863
2864    /* Only two registers to read. */
2865    if (((r1 + 1) & 15) == r3) {
2866        tcg_temp_free(t2);
2867        tcg_temp_free(t1);
2868        return NO_EXIT;
2869    }
2870
2871    /* Then load the remaining registers. Page fault can't occur. */
2872    r3 = (r3 - 1) & 15;
2873    tcg_gen_movi_i64(t2, 4);
2874    while (r1 != r3) {
2875        r1 = (r1 + 1) & 15;
2876        tcg_gen_add_i64(o->in2, o->in2, t2);
2877        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2878        store_reg32_i64(r1, t1);
2879    }
2880    tcg_temp_free(t2);
2881    tcg_temp_free(t1);
2882
2883    return NO_EXIT;
2884}
2885
2886static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2887{
2888    int r1 = get_field(s->fields, r1);
2889    int r3 = get_field(s->fields, r3);
2890    TCGv_i64 t1, t2;
2891
2892    /* Only one register to read. */
2893    t1 = tcg_temp_new_i64();
2894    if (unlikely(r1 == r3)) {
2895        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2896        store_reg32h_i64(r1, t1);
2897        tcg_temp_free(t1);
2898        return NO_EXIT;
2899    }
2900
2901    /* First load the values of the first and last registers to trigger
2902       possible page faults. */
2903    t2 = tcg_temp_new_i64();
2904    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2905    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2906    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2907    store_reg32h_i64(r1, t1);
2908    store_reg32h_i64(r3, t2);
2909
2910    /* Only two registers to read. */
2911    if (((r1 + 1) & 15) == r3) {
2912        tcg_temp_free(t2);
2913        tcg_temp_free(t1);
2914        return NO_EXIT;
2915    }
2916
2917    /* Then load the remaining registers. Page fault can't occur. */
2918    r3 = (r3 - 1) & 15;
2919    tcg_gen_movi_i64(t2, 4);
2920    while (r1 != r3) {
2921        r1 = (r1 + 1) & 15;
2922        tcg_gen_add_i64(o->in2, o->in2, t2);
2923        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2924        store_reg32h_i64(r1, t1);
2925    }
2926    tcg_temp_free(t2);
2927    tcg_temp_free(t1);
2928
2929    return NO_EXIT;
2930}
2931
2932static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2933{
2934    int r1 = get_field(s->fields, r1);
2935    int r3 = get_field(s->fields, r3);
2936    TCGv_i64 t1, t2;
2937
2938    /* Only one register to read. */
2939    if (unlikely(r1 == r3)) {
2940        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2941        return NO_EXIT;
2942    }
2943
2944    /* First load the values of the first and last registers to trigger
2945       possible page faults. */
2946    t1 = tcg_temp_new_i64();
2947    t2 = tcg_temp_new_i64();
2948    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2949    tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2950    tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2951    tcg_gen_mov_i64(regs[r1], t1);
2952    tcg_temp_free(t2);
2953
2954    /* Only two registers to read. */
2955    if (((r1 + 1) & 15) == r3) {
2956        tcg_temp_free(t1);
2957        return NO_EXIT;
2958    }
2959
2960    /* Then load the remaining registers. Page fault can't occur. */
2961    r3 = (r3 - 1) & 15;
2962    tcg_gen_movi_i64(t1, 8);
2963    while (r1 != r3) {
2964        r1 = (r1 + 1) & 15;
2965        tcg_gen_add_i64(o->in2, o->in2, t1);
2966        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2967    }
2968    tcg_temp_free(t1);
2969
2970    return NO_EXIT;
2971}
2972
2973static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2974{
2975    TCGv_i64 a1, a2;
2976    TCGMemOp mop = s->insn->data;
2977
2978    /* In a parallel context, stop the world and single step.  */
2979    if (tb_cflags(s->tb) & CF_PARALLEL) {
2980        update_psw_addr(s);
2981        update_cc_op(s);
2982        gen_exception(EXCP_ATOMIC);
2983        return EXIT_NORETURN;
2984    }
2985
2986    /* In a serial context, perform the two loads ... */
2987    a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2988    a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2989    tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2990    tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2991    tcg_temp_free_i64(a1);
2992    tcg_temp_free_i64(a2);
2993
2994    /* ... and indicate that we performed them while interlocked.  */
2995    gen_op_movi_cc(s, 0);
2996    return NO_EXIT;
2997}
2998
2999static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
3000{
3001    if (tb_cflags(s->tb) & CF_PARALLEL) {
3002        gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3003    } else {
3004        gen_helper_lpq(o->out, cpu_env, o->in2);
3005    }
3006    return_low128(o->out2);
3007    return NO_EXIT;
3008}
3009
3010#ifndef CONFIG_USER_ONLY
3011static ExitStatus op_lura(DisasContext *s, DisasOps *o)
3012{
3013    check_privileged(s);
3014    gen_helper_lura(o->out, cpu_env, o->in2);
3015    return NO_EXIT;
3016}
3017
3018static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
3019{
3020    check_privileged(s);
3021    gen_helper_lurag(o->out, cpu_env, o->in2);
3022    return NO_EXIT;
3023}
3024#endif
3025
3026static ExitStatus op_lzrb(DisasContext *s, DisasOps *o)
3027{
3028    tcg_gen_andi_i64(o->out, o->in2, -256);
3029    return NO_EXIT;
3030}
3031
3032static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
3033{
3034    o->out = o->in2;
3035    o->g_out = o->g_in2;
3036    o->in2 = NULL;
3037    o->g_in2 = false;
3038    return NO_EXIT;
3039}
3040
3041static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
3042{
3043    int b2 = get_field(s->fields, b2);
3044    TCGv ar1 = tcg_temp_new_i64();
3045
3046    o->out = o->in2;
3047    o->g_out = o->g_in2;
3048    o->in2 = NULL;
3049    o->g_in2 = false;
3050
3051    switch (s->tb->flags & FLAG_MASK_ASC) {
3052    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3053        tcg_gen_movi_i64(ar1, 0);
3054        break;
3055    case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3056        tcg_gen_movi_i64(ar1, 1);
3057        break;
3058    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3059        if (b2) {
3060            tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3061        } else {
3062            tcg_gen_movi_i64(ar1, 0);
3063        }
3064        break;
3065    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3066        tcg_gen_movi_i64(ar1, 2);
3067        break;
3068    }
3069
3070    tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3071    tcg_temp_free_i64(ar1);
3072
3073    return NO_EXIT;
3074}
3075
3076static ExitStatus op_movx(DisasContext *s, DisasOps *o)
3077{
3078    o->out = o->in1;
3079    o->out2 = o->in2;
3080    o->g_out = o->g_in1;
3081    o->g_out2 = o->g_in2;
3082    o->in1 = NULL;
3083    o->in2 = NULL;
3084    o->g_in1 = o->g_in2 = false;
3085    return NO_EXIT;
3086}
3087
3088static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
3089{
3090    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3091    gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3092    tcg_temp_free_i32(l);
3093    return NO_EXIT;
3094}
3095
3096static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
3097{
3098    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3099    gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3100    tcg_temp_free_i32(l);
3101    return NO_EXIT;
3102}
3103
3104static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3105{
3106    int r1 = get_field(s->fields, r1);
3107    int r2 = get_field(s->fields, r2);
3108    TCGv_i32 t1, t2;
3109
3110    /* r1 and r2 must be even.  */
3111    if (r1 & 1 || r2 & 1) {
3112        gen_program_exception(s, PGM_SPECIFICATION);
3113        return EXIT_NORETURN;
3114    }
3115
3116    t1 = tcg_const_i32(r1);
3117    t2 = tcg_const_i32(r2);
3118    gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3119    tcg_temp_free_i32(t1);
3120    tcg_temp_free_i32(t2);
3121    set_cc_static(s);
3122    return NO_EXIT;
3123}
3124
3125static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3126{
3127    int r1 = get_field(s->fields, r1);
3128    int r3 = get_field(s->fields, r3);
3129    TCGv_i32 t1, t3;
3130
3131    /* r1 and r3 must be even.  */
3132    if (r1 & 1 || r3 & 1) {
3133        gen_program_exception(s, PGM_SPECIFICATION);
3134        return EXIT_NORETURN;
3135    }
3136
3137    t1 = tcg_const_i32(r1);
3138    t3 = tcg_const_i32(r3);
3139    gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3140    tcg_temp_free_i32(t1);
3141    tcg_temp_free_i32(t3);
3142    set_cc_static(s);
3143    return NO_EXIT;
3144}
3145
3146static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
3147{
3148    int r1 = get_field(s->fields, r1);
3149    int r3 = get_field(s->fields, r3);
3150    TCGv_i32 t1, t3;
3151
3152    /* r1 and r3 must be even.  */
3153    if (r1 & 1 || r3 & 1) {
3154        gen_program_exception(s, PGM_SPECIFICATION);
3155        return EXIT_NORETURN;
3156    }
3157
3158    t1 = tcg_const_i32(r1);
3159    t3 = tcg_const_i32(r3);
3160    gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3161    tcg_temp_free_i32(t1);
3162    tcg_temp_free_i32(t3);
3163    set_cc_static(s);
3164    return NO_EXIT;
3165}
3166
3167static ExitStatus op_mvcos(DisasContext *s, DisasOps *o)
3168{
3169    int r3 = get_field(s->fields, r3);
3170    gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3171    set_cc_static(s);
3172    return NO_EXIT;
3173}
3174
3175#ifndef CONFIG_USER_ONLY
3176static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3177{
3178    int r1 = get_field(s->fields, l1);
3179    check_privileged(s);
3180    gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3181    set_cc_static(s);
3182    return NO_EXIT;
3183}
3184
3185static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3186{
3187    int r1 = get_field(s->fields, l1);
3188    check_privileged(s);
3189    gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3190    set_cc_static(s);
3191    return NO_EXIT;
3192}
3193#endif
3194
3195static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3196{
3197    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3198    gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3199    tcg_temp_free_i32(l);
3200    return NO_EXIT;
3201}
3202
3203static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3204{
3205    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3206    gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3207    tcg_temp_free_i32(l);
3208    return NO_EXIT;
3209}
3210
3211static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3212{
3213    gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3214    set_cc_static(s);
3215    return NO_EXIT;
3216}
3217
3218static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3219{
3220    gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3221    set_cc_static(s);
3222    return_low128(o->in2);
3223    return NO_EXIT;
3224}
3225
3226static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3227{
3228    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3229    gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3230    tcg_temp_free_i32(l);
3231    return NO_EXIT;
3232}
3233
3234static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3235{
3236    tcg_gen_mul_i64(o->out, o->in1, o->in2);
3237    return NO_EXIT;
3238}
3239
3240static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3241{
3242    tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3243    return NO_EXIT;
3244}
3245
3246static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3247{
3248    gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3249    return NO_EXIT;
3250}
3251
3252static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3253{
3254    gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3255    return NO_EXIT;
3256}
3257
3258static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3259{
3260    gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3261    return NO_EXIT;
3262}
3263
3264static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3265{
3266    gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3267    return_low128(o->out2);
3268    return NO_EXIT;
3269}
3270
3271static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3272{
3273    gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3274    return_low128(o->out2);
3275    return NO_EXIT;
3276}
3277
3278static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3279{
3280    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3281    gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3282    tcg_temp_free_i64(r3);
3283    return NO_EXIT;
3284}
3285
3286static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3287{
3288    int r3 = get_field(s->fields, r3);
3289    gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3290    return NO_EXIT;
3291}
3292
3293static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3294{
3295    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3296    gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3297    tcg_temp_free_i64(r3);
3298    return NO_EXIT;
3299}
3300
3301static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3302{
3303    int r3 = get_field(s->fields, r3);
3304    gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3305    return NO_EXIT;
3306}
3307
3308static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3309{
3310    TCGv_i64 z, n;
3311    z = tcg_const_i64(0);
3312    n = tcg_temp_new_i64();
3313    tcg_gen_neg_i64(n, o->in2);
3314    tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3315    tcg_temp_free_i64(n);
3316    tcg_temp_free_i64(z);
3317    return NO_EXIT;
3318}
3319
3320static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3321{
3322    tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3323    return NO_EXIT;
3324}
3325
3326static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3327{
3328    tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3329    return NO_EXIT;
3330}
3331
3332static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3333{
3334    tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3335    tcg_gen_mov_i64(o->out2, o->in2);
3336    return NO_EXIT;
3337}
3338
3339static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3340{
3341    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3342    gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3343    tcg_temp_free_i32(l);
3344    set_cc_static(s);
3345    return NO_EXIT;
3346}
3347
3348static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3349{
3350    tcg_gen_neg_i64(o->out, o->in2);
3351    return NO_EXIT;
3352}
3353
3354static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3355{
3356    tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3357    return NO_EXIT;
3358}
3359
3360static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3361{
3362    tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3363    return NO_EXIT;
3364}
3365
3366static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3367{
3368    tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3369    tcg_gen_mov_i64(o->out2, o->in2);
3370    return NO_EXIT;
3371}
3372
3373static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3374{
3375    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3376    gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3377    tcg_temp_free_i32(l);
3378    set_cc_static(s);
3379    return NO_EXIT;
3380}
3381
3382static ExitStatus op_or(DisasContext *s, DisasOps *o)
3383{
3384    tcg_gen_or_i64(o->out, o->in1, o->in2);
3385    return NO_EXIT;
3386}
3387
3388static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3389{
3390    int shift = s->insn->data & 0xff;
3391    int size = s->insn->data >> 8;
3392    uint64_t mask = ((1ull << size) - 1) << shift;
3393
3394    assert(!o->g_in2);
3395    tcg_gen_shli_i64(o->in2, o->in2, shift);
3396    tcg_gen_or_i64(o->out, o->in1, o->in2);
3397
3398    /* Produce the CC from only the bits manipulated.  */
3399    tcg_gen_andi_i64(cc_dst, o->out, mask);
3400    set_cc_nz_u64(s, cc_dst);
3401    return NO_EXIT;
3402}
3403
3404static ExitStatus op_oi(DisasContext *s, DisasOps *o)
3405{
3406    o->in1 = tcg_temp_new_i64();
3407
3408    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3409        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3410    } else {
3411        /* Perform the atomic operation in memory. */
3412        tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3413                                    s->insn->data);
3414    }
3415
3416    /* Recompute also for atomic case: needed for setting CC. */
3417    tcg_gen_or_i64(o->out, o->in1, o->in2);
3418
3419    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3420        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3421    }
3422    return NO_EXIT;
3423}
3424
3425static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3426{
3427    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3428    gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3429    tcg_temp_free_i32(l);
3430    return NO_EXIT;
3431}
3432
3433static ExitStatus op_pka(DisasContext *s, DisasOps *o)
3434{
3435    int l2 = get_field(s->fields, l2) + 1;
3436    TCGv_i32 l;
3437
3438    /* The length must not exceed 32 bytes.  */
3439    if (l2 > 32) {
3440        gen_program_exception(s, PGM_SPECIFICATION);
3441        return EXIT_NORETURN;
3442    }
3443    l = tcg_const_i32(l2);
3444    gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3445    tcg_temp_free_i32(l);
3446    return NO_EXIT;
3447}
3448
3449static ExitStatus op_pku(DisasContext *s, DisasOps *o)
3450{
3451    int l2 = get_field(s->fields, l2) + 1;
3452    TCGv_i32 l;
3453
3454    /* The length must be even and should not exceed 64 bytes.  */
3455    if ((l2 & 1) || (l2 > 64)) {
3456        gen_program_exception(s, PGM_SPECIFICATION);
3457        return EXIT_NORETURN;
3458    }
3459    l = tcg_const_i32(l2);
3460    gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3461    tcg_temp_free_i32(l);
3462    return NO_EXIT;
3463}
3464
3465static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3466{
3467    gen_helper_popcnt(o->out, o->in2);
3468    return NO_EXIT;
3469}
3470
3471#ifndef CONFIG_USER_ONLY
3472static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3473{
3474    check_privileged(s);
3475    gen_helper_ptlb(cpu_env);
3476    return NO_EXIT;
3477}
3478#endif
3479
3480static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3481{
3482    int i3 = get_field(s->fields, i3);
3483    int i4 = get_field(s->fields, i4);
3484    int i5 = get_field(s->fields, i5);
3485    int do_zero = i4 & 0x80;
3486    uint64_t mask, imask, pmask;
3487    int pos, len, rot;
3488
3489    /* Adjust the arguments for the specific insn.  */
3490    switch (s->fields->op2) {
3491    case 0x55: /* risbg */
3492    case 0x59: /* risbgn */
3493        i3 &= 63;
3494        i4 &= 63;
3495        pmask = ~0;
3496        break;
3497    case 0x5d: /* risbhg */
3498        i3 &= 31;
3499        i4 &= 31;
3500        pmask = 0xffffffff00000000ull;
3501        break;
3502    case 0x51: /* risblg */
3503        i3 &= 31;
3504        i4 &= 31;
3505        pmask = 0x00000000ffffffffull;
3506        break;
3507    default:
3508        g_assert_not_reached();
3509    }
3510
3511    /* MASK is the set of bits to be inserted from R2.
3512       Take care for I3/I4 wraparound.  */
3513    mask = pmask >> i3;
3514    if (i3 <= i4) {
3515        mask ^= pmask >> i4 >> 1;
3516    } else {
3517        mask |= ~(pmask >> i4 >> 1);
3518    }
3519    mask &= pmask;
3520
3521    /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3522       insns, we need to keep the other half of the register.  */
3523    imask = ~mask | ~pmask;
3524    if (do_zero) {
3525        imask = ~pmask;
3526    }
3527
3528    len = i4 - i3 + 1;
3529    pos = 63 - i4;
3530    rot = i5 & 63;
3531    if (s->fields->op2 == 0x5d) {
3532        pos += 32;
3533    }
3534
3535    /* In some cases we can implement this with extract.  */
3536    if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3537        tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3538        return NO_EXIT;
3539    }
3540
3541    /* In some cases we can implement this with deposit.  */
3542    if (len > 0 && (imask == 0 || ~mask == imask)) {
3543        /* Note that we rotate the bits to be inserted to the lsb, not to
3544           the position as described in the PoO.  */
3545        rot = (rot - pos) & 63;
3546    } else {
3547        pos = -1;
3548    }
3549
3550    /* Rotate the input as necessary.  */
3551    tcg_gen_rotli_i64(o->in2, o->in2, rot);
3552
3553    /* Insert the selected bits into the output.  */
3554    if (pos >= 0) {
3555        if (imask == 0) {
3556            tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3557        } else {
3558            tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3559        }
3560    } else if (imask == 0) {
3561        tcg_gen_andi_i64(o->out, o->in2, mask);
3562    } else {
3563        tcg_gen_andi_i64(o->in2, o->in2, mask);
3564        tcg_gen_andi_i64(o->out, o->out, imask);
3565        tcg_gen_or_i64(o->out, o->out, o->in2);
3566    }
3567    return NO_EXIT;
3568}
3569
3570static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3571{
3572    int i3 = get_field(s->fields, i3);
3573    int i4 = get_field(s->fields, i4);
3574    int i5 = get_field(s->fields, i5);
3575    uint64_t mask;
3576
3577    /* If this is a test-only form, arrange to discard the result.  */
3578    if (i3 & 0x80) {
3579        o->out = tcg_temp_new_i64();
3580        o->g_out = false;
3581    }
3582
3583    i3 &= 63;
3584    i4 &= 63;
3585    i5 &= 63;
3586
3587    /* MASK is the set of bits to be operated on from R2.
3588       Take care for I3/I4 wraparound.  */
3589    mask = ~0ull >> i3;
3590    if (i3 <= i4) {
3591        mask ^= ~0ull >> i4 >> 1;
3592    } else {
3593        mask |= ~(~0ull >> i4 >> 1);
3594    }
3595
3596    /* Rotate the input as necessary.  */
3597    tcg_gen_rotli_i64(o->in2, o->in2, i5);
3598
3599    /* Operate.  */
3600    switch (s->fields->op2) {
3601    case 0x55: /* AND */
3602        tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3603        tcg_gen_and_i64(o->out, o->out, o->in2);
3604        break;
3605    case 0x56: /* OR */
3606        tcg_gen_andi_i64(o->in2, o->in2, mask);
3607        tcg_gen_or_i64(o->out, o->out, o->in2);
3608        break;
3609    case 0x57: /* XOR */
3610        tcg_gen_andi_i64(o->in2, o->in2, mask);
3611        tcg_gen_xor_i64(o->out, o->out, o->in2);
3612        break;
3613    default:
3614        abort();
3615    }
3616
3617    /* Set the CC.  */
3618    tcg_gen_andi_i64(cc_dst, o->out, mask);
3619    set_cc_nz_u64(s, cc_dst);
3620    return NO_EXIT;
3621}
3622
3623static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3624{
3625    tcg_gen_bswap16_i64(o->out, o->in2);
3626    return NO_EXIT;
3627}
3628
3629static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3630{
3631    tcg_gen_bswap32_i64(o->out, o->in2);
3632    return NO_EXIT;
3633}
3634
3635static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3636{
3637    tcg_gen_bswap64_i64(o->out, o->in2);
3638    return NO_EXIT;
3639}
3640
3641static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3642{
3643    TCGv_i32 t1 = tcg_temp_new_i32();
3644    TCGv_i32 t2 = tcg_temp_new_i32();
3645    TCGv_i32 to = tcg_temp_new_i32();
3646    tcg_gen_extrl_i64_i32(t1, o->in1);
3647    tcg_gen_extrl_i64_i32(t2, o->in2);
3648    tcg_gen_rotl_i32(to, t1, t2);
3649    tcg_gen_extu_i32_i64(o->out, to);
3650    tcg_temp_free_i32(t1);
3651    tcg_temp_free_i32(t2);
3652    tcg_temp_free_i32(to);
3653    return NO_EXIT;
3654}
3655
3656static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3657{
3658    tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3659    return NO_EXIT;
3660}
3661
3662#ifndef CONFIG_USER_ONLY
3663static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3664{
3665    check_privileged(s);
3666    gen_helper_rrbe(cc_op, cpu_env, o->in2);
3667    set_cc_static(s);
3668    return NO_EXIT;
3669}
3670
3671static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3672{
3673    check_privileged(s);
3674    gen_helper_sacf(cpu_env, o->in2);
3675    /* Addressing mode has changed, so end the block.  */
3676    return EXIT_PC_STALE;
3677}
3678#endif
3679
3680static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3681{
3682    int sam = s->insn->data;
3683    TCGv_i64 tsam;
3684    uint64_t mask;
3685
3686    switch (sam) {
3687    case 0:
3688        mask = 0xffffff;
3689        break;
3690    case 1:
3691        mask = 0x7fffffff;
3692        break;
3693    default:
3694        mask = -1;
3695        break;
3696    }
3697
3698    /* Bizarre but true, we check the address of the current insn for the
3699       specification exception, not the next to be executed.  Thus the PoO
3700       documents that Bad Things Happen two bytes before the end.  */
3701    if (s->pc & ~mask) {
3702        gen_program_exception(s, PGM_SPECIFICATION);
3703        return EXIT_NORETURN;
3704    }
3705    s->next_pc &= mask;
3706
3707    tsam = tcg_const_i64(sam);
3708    tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3709    tcg_temp_free_i64(tsam);
3710
3711    /* Always exit the TB, since we (may have) changed execution mode.  */
3712    return EXIT_PC_STALE;
3713}
3714
3715static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3716{
3717    int r1 = get_field(s->fields, r1);
3718    tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3719    return NO_EXIT;
3720}
3721
3722static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3723{
3724    gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3725    return NO_EXIT;
3726}
3727
3728static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3729{
3730    gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3731    return NO_EXIT;
3732}
3733
3734static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3735{
3736    gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3737    return_low128(o->out2);
3738    return NO_EXIT;
3739}
3740
3741static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3742{
3743    gen_helper_sqeb(o->out, cpu_env, o->in2);
3744    return NO_EXIT;
3745}
3746
3747static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3748{
3749    gen_helper_sqdb(o->out, cpu_env, o->in2);
3750    return NO_EXIT;
3751}
3752
3753static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3754{
3755    gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3756    return_low128(o->out2);
3757    return NO_EXIT;
3758}
3759
3760#ifndef CONFIG_USER_ONLY
3761static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3762{
3763    check_privileged(s);
3764    gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3765    set_cc_static(s);
3766    return NO_EXIT;
3767}
3768
3769static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3770{
3771    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3772    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3773    check_privileged(s);
3774    gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3775    set_cc_static(s);
3776    tcg_temp_free_i32(r1);
3777    tcg_temp_free_i32(r3);
3778    return NO_EXIT;
3779}
3780#endif
3781
3782static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3783{
3784    DisasCompare c;
3785    TCGv_i64 a, h;
3786    TCGLabel *lab;
3787    int r1;
3788
3789    disas_jcc(s, &c, get_field(s->fields, m3));
3790
3791    /* We want to store when the condition is fulfilled, so branch
3792       out when it's not */
3793    c.cond = tcg_invert_cond(c.cond);
3794
3795    lab = gen_new_label();
3796    if (c.is_64) {
3797        tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3798    } else {
3799        tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3800    }
3801    free_compare(&c);
3802
3803    r1 = get_field(s->fields, r1);
3804    a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3805    switch (s->insn->data) {
3806    case 1: /* STOCG */
3807        tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3808        break;
3809    case 0: /* STOC */
3810        tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3811        break;
3812    case 2: /* STOCFH */
3813        h = tcg_temp_new_i64();
3814        tcg_gen_shri_i64(h, regs[r1], 32);
3815        tcg_gen_qemu_st32(h, a, get_mem_index(s));
3816        tcg_temp_free_i64(h);
3817        break;
3818    default:
3819        g_assert_not_reached();
3820    }
3821    tcg_temp_free_i64(a);
3822
3823    gen_set_label(lab);
3824    return NO_EXIT;
3825}
3826
3827static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3828{
3829    uint64_t sign = 1ull << s->insn->data;
3830    enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3831    gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3832    tcg_gen_shl_i64(o->out, o->in1, o->in2);
3833    /* The arithmetic left shift is curious in that it does not affect
3834       the sign bit.  Copy that over from the source unchanged.  */
3835    tcg_gen_andi_i64(o->out, o->out, ~sign);
3836    tcg_gen_andi_i64(o->in1, o->in1, sign);
3837    tcg_gen_or_i64(o->out, o->out, o->in1);
3838    return NO_EXIT;
3839}
3840
3841static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3842{
3843    tcg_gen_shl_i64(o->out, o->in1, o->in2);
3844    return NO_EXIT;
3845}
3846
3847static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3848{
3849    tcg_gen_sar_i64(o->out, o->in1, o->in2);
3850    return NO_EXIT;
3851}
3852
3853static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3854{
3855    tcg_gen_shr_i64(o->out, o->in1, o->in2);
3856    return NO_EXIT;
3857}
3858
3859static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3860{
3861    gen_helper_sfpc(cpu_env, o->in2);
3862    return NO_EXIT;
3863}
3864
3865static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3866{
3867    gen_helper_sfas(cpu_env, o->in2);
3868    return NO_EXIT;
3869}
3870
3871static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3872{
3873    int b2 = get_field(s->fields, b2);
3874    int d2 = get_field(s->fields, d2);
3875    TCGv_i64 t1 = tcg_temp_new_i64();
3876    TCGv_i64 t2 = tcg_temp_new_i64();
3877    int mask, pos, len;
3878
3879    switch (s->fields->op2) {
3880    case 0x99: /* SRNM */
3881        pos = 0, len = 2;
3882        break;
3883    case 0xb8: /* SRNMB */
3884        pos = 0, len = 3;
3885        break;
3886    case 0xb9: /* SRNMT */
3887        pos = 4, len = 3;
3888        break;
3889    default:
3890        tcg_abort();
3891    }
3892    mask = (1 << len) - 1;
3893
3894    /* Insert the value into the appropriate field of the FPC.  */
3895    if (b2 == 0) {
3896        tcg_gen_movi_i64(t1, d2 & mask);
3897    } else {
3898        tcg_gen_addi_i64(t1, regs[b2], d2);
3899        tcg_gen_andi_i64(t1, t1, mask);
3900    }
3901    tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3902    tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3903    tcg_temp_free_i64(t1);
3904
3905    /* Then install the new FPC to set the rounding mode in fpu_status.  */
3906    gen_helper_sfpc(cpu_env, t2);
3907    tcg_temp_free_i64(t2);
3908    return NO_EXIT;
3909}
3910
3911static ExitStatus op_spm(DisasContext *s, DisasOps *o)
3912{
3913    tcg_gen_extrl_i64_i32(cc_op, o->in1);
3914    tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
3915    set_cc_static(s);
3916
3917    tcg_gen_shri_i64(o->in1, o->in1, 24);
3918    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
3919    return NO_EXIT;
3920}
3921
3922static ExitStatus op_ectg(DisasContext *s, DisasOps *o)
3923{
3924    int b1 = get_field(s->fields, b1);
3925    int d1 = get_field(s->fields, d1);
3926    int b2 = get_field(s->fields, b2);
3927    int d2 = get_field(s->fields, d2);
3928    int r3 = get_field(s->fields, r3);
3929    TCGv_i64 tmp = tcg_temp_new_i64();
3930
3931    /* fetch all operands first */
3932    o->in1 = tcg_temp_new_i64();
3933    tcg_gen_addi_i64(o->in1, regs[b1], d1);
3934    o->in2 = tcg_temp_new_i64();
3935    tcg_gen_addi_i64(o->in2, regs[b2], d2);
3936    o->addr1 = get_address(s, 0, r3, 0);
3937
3938    /* load the third operand into r3 before modifying anything */
3939    tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
3940
3941    /* subtract CPU timer from first operand and store in GR0 */
3942    gen_helper_stpt(tmp, cpu_env);
3943    tcg_gen_sub_i64(regs[0], o->in1, tmp);
3944
3945    /* store second operand in GR1 */
3946    tcg_gen_mov_i64(regs[1], o->in2);
3947
3948    tcg_temp_free_i64(tmp);
3949    return NO_EXIT;
3950}
3951
3952#ifndef CONFIG_USER_ONLY
3953static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3954{
3955    check_privileged(s);
3956    tcg_gen_shri_i64(o->in2, o->in2, 4);
3957    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3958    return NO_EXIT;
3959}
3960
3961static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3962{
3963    check_privileged(s);
3964    gen_helper_sske(cpu_env, o->in1, o->in2);
3965    return NO_EXIT;
3966}
3967
3968static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3969{
3970    check_privileged(s);
3971    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3972    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3973    return EXIT_PC_STALE_NOCHAIN;
3974}
3975
3976static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3977{
3978    check_privileged(s);
3979    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
3980    return NO_EXIT;
3981}
3982
3983static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3984{
3985    gen_helper_stck(o->out, cpu_env);
3986    /* ??? We don't implement clock states.  */
3987    gen_op_movi_cc(s, 0);
3988    return NO_EXIT;
3989}
3990
3991static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3992{
3993    TCGv_i64 c1 = tcg_temp_new_i64();
3994    TCGv_i64 c2 = tcg_temp_new_i64();
3995    TCGv_i64 todpr = tcg_temp_new_i64();
3996    gen_helper_stck(c1, cpu_env);
3997    /* 16 bit value store in an uint32_t (only valid bits set) */
3998    tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
3999    /* Shift the 64-bit value into its place as a zero-extended
4000       104-bit value.  Note that "bit positions 64-103 are always
4001       non-zero so that they compare differently to STCK"; we set
4002       the least significant bit to 1.  */
4003    tcg_gen_shli_i64(c2, c1, 56);
4004    tcg_gen_shri_i64(c1, c1, 8);
4005    tcg_gen_ori_i64(c2, c2, 0x10000);
4006    tcg_gen_or_i64(c2, c2, todpr);
4007    tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4008    tcg_gen_addi_i64(o->in2, o->in2, 8);
4009    tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4010    tcg_temp_free_i64(c1);
4011    tcg_temp_free_i64(c2);
4012    tcg_temp_free_i64(todpr);
4013    /* ??? We don't implement clock states.  */
4014    gen_op_movi_cc(s, 0);
4015    return NO_EXIT;
4016}
4017
4018static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
4019{
4020    check_privileged(s);
4021    gen_helper_sckc(cpu_env, o->in2);
4022    return NO_EXIT;
4023}
4024
4025static ExitStatus op_sckpf(DisasContext *s, DisasOps *o)
4026{
4027    check_privileged(s);
4028    gen_helper_sckpf(cpu_env, regs[0]);
4029    return NO_EXIT;
4030}
4031
4032static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
4033{
4034    check_privileged(s);
4035    gen_helper_stckc(o->out, cpu_env);
4036    return NO_EXIT;
4037}
4038
4039static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
4040{
4041    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4042    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4043    check_privileged(s);
4044    gen_helper_stctg(cpu_env, r1, o->in2, r3);
4045    tcg_temp_free_i32(r1);
4046    tcg_temp_free_i32(r3);
4047    return NO_EXIT;
4048}
4049
4050static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
4051{
4052    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4053    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4054    check_privileged(s);
4055    gen_helper_stctl(cpu_env, r1, o->in2, r3);
4056    tcg_temp_free_i32(r1);
4057    tcg_temp_free_i32(r3);
4058    return NO_EXIT;
4059}
4060
4061static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
4062{
4063    check_privileged(s);
4064    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4065    return NO_EXIT;
4066}
4067
4068static ExitStatus op_spt(DisasContext *s, DisasOps *o)
4069{
4070    check_privileged(s);
4071    gen_helper_spt(cpu_env, o->in2);
4072    return NO_EXIT;
4073}
4074
4075static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
4076{
4077    check_privileged(s);
4078    gen_helper_stfl(cpu_env);
4079    return NO_EXIT;
4080}
4081
4082static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
4083{
4084    check_privileged(s);
4085    gen_helper_stpt(o->out, cpu_env);
4086    return NO_EXIT;
4087}
4088
4089static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
4090{
4091    check_privileged(s);
4092    gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4093    set_cc_static(s);
4094    return NO_EXIT;
4095}
4096
4097static ExitStatus op_spx(DisasContext *s, DisasOps *o)
4098{
4099    check_privileged(s);
4100    gen_helper_spx(cpu_env, o->in2);
4101    return NO_EXIT;
4102}
4103
4104static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
4105{
4106    check_privileged(s);
4107    gen_helper_xsch(cpu_env, regs[1]);
4108    set_cc_static(s);
4109    return NO_EXIT;
4110}
4111
4112static ExitStatus op_csch(DisasContext *s, DisasOps *o)
4113{
4114    check_privileged(s);
4115    gen_helper_csch(cpu_env, regs[1]);
4116    set_cc_static(s);
4117    return NO_EXIT;
4118}
4119
4120static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
4121{
4122    check_privileged(s);
4123    gen_helper_hsch(cpu_env, regs[1]);
4124    set_cc_static(s);
4125    return NO_EXIT;
4126}
4127
4128static ExitStatus op_msch(DisasContext *s, DisasOps *o)
4129{
4130    check_privileged(s);
4131    gen_helper_msch(cpu_env, regs[1], o->in2);
4132    set_cc_static(s);
4133    return NO_EXIT;
4134}
4135
4136static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
4137{
4138    check_privileged(s);
4139    gen_helper_rchp(cpu_env, regs[1]);
4140    set_cc_static(s);
4141    return NO_EXIT;
4142}
4143
4144static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
4145{
4146    check_privileged(s);
4147    gen_helper_rsch(cpu_env, regs[1]);
4148    set_cc_static(s);
4149    return NO_EXIT;
4150}
4151
4152static ExitStatus op_sal(DisasContext *s, DisasOps *o)
4153{
4154    check_privileged(s);
4155    gen_helper_sal(cpu_env, regs[1]);
4156    return NO_EXIT;
4157}
4158
4159static ExitStatus op_schm(DisasContext *s, DisasOps *o)
4160{
4161    check_privileged(s);
4162    gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4163    return NO_EXIT;
4164}
4165
4166static ExitStatus op_siga(DisasContext *s, DisasOps *o)
4167{
4168    check_privileged(s);
4169    /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4170    gen_op_movi_cc(s, 3);
4171    return NO_EXIT;
4172}
4173
4174static ExitStatus op_stcps(DisasContext *s, DisasOps *o)
4175{
4176    check_privileged(s);
4177    /* The instruction is suppressed if not provided. */
4178    return NO_EXIT;
4179}
4180
4181static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
4182{
4183    check_privileged(s);
4184    gen_helper_ssch(cpu_env, regs[1], o->in2);
4185    set_cc_static(s);
4186    return NO_EXIT;
4187}
4188
4189static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
4190{
4191    check_privileged(s);
4192    gen_helper_stsch(cpu_env, regs[1], o->in2);
4193    set_cc_static(s);
4194    return NO_EXIT;
4195}
4196
4197static ExitStatus op_stcrw(DisasContext *s, DisasOps *o)
4198{
4199    check_privileged(s);
4200    gen_helper_stcrw(cpu_env, o->in2);
4201    set_cc_static(s);
4202    return NO_EXIT;
4203}
4204
4205static ExitStatus op_tpi(DisasContext *s, DisasOps *o)
4206{
4207    check_privileged(s);
4208    gen_helper_tpi(cc_op, cpu_env, o->addr1);
4209    set_cc_static(s);
4210    return NO_EXIT;
4211}
4212
4213static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
4214{
4215    check_privileged(s);
4216    gen_helper_tsch(cpu_env, regs[1], o->in2);
4217    set_cc_static(s);
4218    return NO_EXIT;
4219}
4220
4221static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
4222{
4223    check_privileged(s);
4224    gen_helper_chsc(cpu_env, o->in2);
4225    set_cc_static(s);
4226    return NO_EXIT;
4227}
4228
4229static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
4230{
4231    check_privileged(s);
4232    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4233    tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4234    return NO_EXIT;
4235}
4236
4237static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
4238{
4239    uint64_t i2 = get_field(s->fields, i2);
4240    TCGv_i64 t;
4241
4242    check_privileged(s);
4243
4244    /* It is important to do what the instruction name says: STORE THEN.
4245       If we let the output hook perform the store then if we fault and
4246       restart, we'll have the wrong SYSTEM MASK in place.  */
4247    t = tcg_temp_new_i64();
4248    tcg_gen_shri_i64(t, psw_mask, 56);
4249    tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4250    tcg_temp_free_i64(t);
4251
4252    if (s->fields->op == 0xac) {
4253        tcg_gen_andi_i64(psw_mask, psw_mask,
4254                         (i2 << 56) | 0x00ffffffffffffffull);
4255    } else {
4256        tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4257    }
4258
4259    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4260    return EXIT_PC_STALE_NOCHAIN;
4261}
4262
4263static ExitStatus op_stura(DisasContext *s, DisasOps *o)
4264{
4265    check_privileged(s);
4266    gen_helper_stura(cpu_env, o->in2, o->in1);
4267    return NO_EXIT;
4268}
4269
4270static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
4271{
4272    check_privileged(s);
4273    gen_helper_sturg(cpu_env, o->in2, o->in1);
4274    return NO_EXIT;
4275}
4276#endif
4277
4278static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
4279{
4280    gen_helper_stfle(cc_op, cpu_env, o->in2);
4281    set_cc_static(s);
4282    return NO_EXIT;
4283}
4284
4285static ExitStatus op_st8(DisasContext *s, DisasOps *o)
4286{
4287    tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4288    return NO_EXIT;
4289}
4290
4291static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4292{
4293    tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4294    return NO_EXIT;
4295}
4296
4297static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4298{
4299    tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4300    return NO_EXIT;
4301}
4302
4303static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4304{
4305    tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4306    return NO_EXIT;
4307}
4308
4309static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4310{
4311    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4312    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4313    gen_helper_stam(cpu_env, r1, o->in2, r3);
4314    tcg_temp_free_i32(r1);
4315    tcg_temp_free_i32(r3);
4316    return NO_EXIT;
4317}
4318
4319static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4320{
4321    int m3 = get_field(s->fields, m3);
4322    int pos, base = s->insn->data;
4323    TCGv_i64 tmp = tcg_temp_new_i64();
4324
4325    pos = base + ctz32(m3) * 8;
4326    switch (m3) {
4327    case 0xf:
4328        /* Effectively a 32-bit store.  */
4329        tcg_gen_shri_i64(tmp, o->in1, pos);
4330        tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4331        break;
4332
4333    case 0xc:
4334    case 0x6:
4335    case 0x3:
4336        /* Effectively a 16-bit store.  */
4337        tcg_gen_shri_i64(tmp, o->in1, pos);
4338        tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4339        break;
4340
4341    case 0x8:
4342    case 0x4:
4343    case 0x2:
4344    case 0x1:
4345        /* Effectively an 8-bit store.  */
4346        tcg_gen_shri_i64(tmp, o->in1, pos);
4347        tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4348        break;
4349
4350    default:
4351        /* This is going to be a sequence of shifts and stores.  */
4352        pos = base + 32 - 8;
4353        while (m3) {
4354            if (m3 & 0x8) {
4355                tcg_gen_shri_i64(tmp, o->in1, pos);
4356                tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4357                tcg_gen_addi_i64(o->in2, o->in2, 1);
4358            }
4359            m3 = (m3 << 1) & 0xf;
4360            pos -= 8;
4361        }
4362        break;
4363    }
4364    tcg_temp_free_i64(tmp);
4365    return NO_EXIT;
4366}
4367
4368static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4369{
4370    int r1 = get_field(s->fields, r1);
4371    int r3 = get_field(s->fields, r3);
4372    int size = s->insn->data;
4373    TCGv_i64 tsize = tcg_const_i64(size);
4374
4375    while (1) {
4376        if (size == 8) {
4377            tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4378        } else {
4379            tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4380        }
4381        if (r1 == r3) {
4382            break;
4383        }
4384        tcg_gen_add_i64(o->in2, o->in2, tsize);
4385        r1 = (r1 + 1) & 15;
4386    }
4387
4388    tcg_temp_free_i64(tsize);
4389    return NO_EXIT;
4390}
4391
4392static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4393{
4394    int r1 = get_field(s->fields, r1);
4395    int r3 = get_field(s->fields, r3);
4396    TCGv_i64 t = tcg_temp_new_i64();
4397    TCGv_i64 t4 = tcg_const_i64(4);
4398    TCGv_i64 t32 = tcg_const_i64(32);
4399
4400    while (1) {
4401        tcg_gen_shl_i64(t, regs[r1], t32);
4402        tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4403        if (r1 == r3) {
4404            break;
4405        }
4406        tcg_gen_add_i64(o->in2, o->in2, t4);
4407        r1 = (r1 + 1) & 15;
4408    }
4409
4410    tcg_temp_free_i64(t);
4411    tcg_temp_free_i64(t4);
4412    tcg_temp_free_i64(t32);
4413    return NO_EXIT;
4414}
4415
4416static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
4417{
4418    if (tb_cflags(s->tb) & CF_PARALLEL) {
4419        gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4420    } else {
4421        gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4422    }
4423    return NO_EXIT;
4424}
4425
4426static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4427{
4428    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4429    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4430
4431    gen_helper_srst(cpu_env, r1, r2);
4432
4433    tcg_temp_free_i32(r1);
4434    tcg_temp_free_i32(r2);
4435    set_cc_static(s);
4436    return NO_EXIT;
4437}
4438
4439static ExitStatus op_srstu(DisasContext *s, DisasOps *o)
4440{
4441    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4442    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4443
4444    gen_helper_srstu(cpu_env, r1, r2);
4445
4446    tcg_temp_free_i32(r1);
4447    tcg_temp_free_i32(r2);
4448    set_cc_static(s);
4449    return NO_EXIT;
4450}
4451
4452static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4453{
4454    tcg_gen_sub_i64(o->out, o->in1, o->in2);
4455    return NO_EXIT;
4456}
4457
4458static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4459{
4460    DisasCompare cmp;
4461    TCGv_i64 borrow;
4462
4463    tcg_gen_sub_i64(o->out, o->in1, o->in2);
4464
4465    /* The !borrow flag is the msb of CC.  Since we want the inverse of
4466       that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4.  */
4467    disas_jcc(s, &cmp, 8 | 4);
4468    borrow = tcg_temp_new_i64();
4469    if (cmp.is_64) {
4470        tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4471    } else {
4472        TCGv_i32 t = tcg_temp_new_i32();
4473        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4474        tcg_gen_extu_i32_i64(borrow, t);
4475        tcg_temp_free_i32(t);
4476    }
4477    free_compare(&cmp);
4478
4479    tcg_gen_sub_i64(o->out, o->out, borrow);
4480    tcg_temp_free_i64(borrow);
4481    return NO_EXIT;
4482}
4483
4484static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4485{
4486    TCGv_i32 t;
4487
4488    update_psw_addr(s);
4489    update_cc_op(s);
4490
4491    t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4492    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4493    tcg_temp_free_i32(t);
4494
4495    t = tcg_const_i32(s->ilen);
4496    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4497    tcg_temp_free_i32(t);
4498
4499    gen_exception(EXCP_SVC);
4500    return EXIT_NORETURN;
4501}
4502
4503static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4504{
4505    int cc = 0;
4506
4507    cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4508    cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4509    gen_op_movi_cc(s, cc);
4510    return NO_EXIT;
4511}
4512
4513static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4514{
4515    gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4516    set_cc_static(s);
4517    return NO_EXIT;
4518}
4519
4520static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4521{
4522    gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4523    set_cc_static(s);
4524    return NO_EXIT;
4525}
4526
4527static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4528{
4529    gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4530    set_cc_static(s);
4531    return NO_EXIT;
4532}
4533
4534#ifndef CONFIG_USER_ONLY
4535
4536static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4537{
4538    check_privileged(s);
4539    gen_helper_testblock(cc_op, cpu_env, o->in2);
4540    set_cc_static(s);
4541    return NO_EXIT;
4542}
4543
4544static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4545{
4546    gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4547    set_cc_static(s);
4548    return NO_EXIT;
4549}
4550
4551#endif
4552
4553static ExitStatus op_tp(DisasContext *s, DisasOps *o)
4554{
4555    TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4556    gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4557    tcg_temp_free_i32(l1);
4558    set_cc_static(s);
4559    return NO_EXIT;
4560}
4561
4562static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4563{
4564    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4565    gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4566    tcg_temp_free_i32(l);
4567    set_cc_static(s);
4568    return NO_EXIT;
4569}
4570
4571static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4572{
4573    gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4574    return_low128(o->out2);
4575    set_cc_static(s);
4576    return NO_EXIT;
4577}
4578
4579static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4580{
4581    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4582    gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4583    tcg_temp_free_i32(l);
4584    set_cc_static(s);
4585    return NO_EXIT;
4586}
4587
4588static ExitStatus op_trtr(DisasContext *s, DisasOps *o)
4589{
4590    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4591    gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4592    tcg_temp_free_i32(l);
4593    set_cc_static(s);
4594    return NO_EXIT;
4595}
4596
4597static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
4598{
4599    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4600    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4601    TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4602    TCGv_i32 tst = tcg_temp_new_i32();
4603    int m3 = get_field(s->fields, m3);
4604
4605    if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4606        m3 = 0;
4607    }
4608    if (m3 & 1) {
4609        tcg_gen_movi_i32(tst, -1);
4610    } else {
4611        tcg_gen_extrl_i64_i32(tst, regs[0]);
4612        if (s->insn->opc & 3) {
4613            tcg_gen_ext8u_i32(tst, tst);
4614        } else {
4615            tcg_gen_ext16u_i32(tst, tst);
4616        }
4617    }
4618    gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4619
4620    tcg_temp_free_i32(r1);
4621    tcg_temp_free_i32(r2);
4622    tcg_temp_free_i32(sizes);
4623    tcg_temp_free_i32(tst);
4624    set_cc_static(s);
4625    return NO_EXIT;
4626}
4627
4628static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4629{
4630    TCGv_i32 t1 = tcg_const_i32(0xff);
4631    tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4632    tcg_gen_extract_i32(cc_op, t1, 7, 1);
4633    tcg_temp_free_i32(t1);
4634    set_cc_static(s);
4635    return NO_EXIT;
4636}
4637
4638static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4639{
4640    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4641    gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4642    tcg_temp_free_i32(l);
4643    return NO_EXIT;
4644}
4645
4646static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
4647{
4648    int l1 = get_field(s->fields, l1) + 1;
4649    TCGv_i32 l;
4650
4651    /* The length must not exceed 32 bytes.  */
4652    if (l1 > 32) {
4653        gen_program_exception(s, PGM_SPECIFICATION);
4654        return EXIT_NORETURN;
4655    }
4656    l = tcg_const_i32(l1);
4657    gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4658    tcg_temp_free_i32(l);
4659    set_cc_static(s);
4660    return NO_EXIT;
4661}
4662
4663static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
4664{
4665    int l1 = get_field(s->fields, l1) + 1;
4666    TCGv_i32 l;
4667
4668    /* The length must be even and should not exceed 64 bytes.  */
4669    if ((l1 & 1) || (l1 > 64)) {
4670        gen_program_exception(s, PGM_SPECIFICATION);
4671        return EXIT_NORETURN;
4672    }
4673    l = tcg_const_i32(l1);
4674    gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4675    tcg_temp_free_i32(l);
4676    set_cc_static(s);
4677    return NO_EXIT;
4678}
4679
4680
4681static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4682{
4683    int d1 = get_field(s->fields, d1);
4684    int d2 = get_field(s->fields, d2);
4685    int b1 = get_field(s->fields, b1);
4686    int b2 = get_field(s->fields, b2);
4687    int l = get_field(s->fields, l1);
4688    TCGv_i32 t32;
4689
4690    o->addr1 = get_address(s, 0, b1, d1);
4691
4692    /* If the addresses are identical, this is a store/memset of zero.  */
4693    if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4694        o->in2 = tcg_const_i64(0);
4695
4696        l++;
4697        while (l >= 8) {
4698            tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4699            l -= 8;
4700            if (l > 0) {
4701                tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4702            }
4703        }
4704        if (l >= 4) {
4705            tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4706            l -= 4;
4707            if (l > 0) {
4708                tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4709            }
4710        }
4711        if (l >= 2) {
4712            tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4713            l -= 2;
4714            if (l > 0) {
4715                tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4716            }
4717        }
4718        if (l) {
4719            tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4720        }
4721        gen_op_movi_cc(s, 0);
4722        return NO_EXIT;
4723    }
4724
4725    /* But in general we'll defer to a helper.  */
4726    o->in2 = get_address(s, 0, b2, d2);
4727    t32 = tcg_const_i32(l);
4728    gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4729    tcg_temp_free_i32(t32);
4730    set_cc_static(s);
4731    return NO_EXIT;
4732}
4733
4734static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4735{
4736    tcg_gen_xor_i64(o->out, o->in1, o->in2);
4737    return NO_EXIT;
4738}
4739
4740static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4741{
4742    int shift = s->insn->data & 0xff;
4743    int size = s->insn->data >> 8;
4744    uint64_t mask = ((1ull << size) - 1) << shift;
4745
4746    assert(!o->g_in2);
4747    tcg_gen_shli_i64(o->in2, o->in2, shift);
4748    tcg_gen_xor_i64(o->out, o->in1, o->in2);
4749
4750    /* Produce the CC from only the bits manipulated.  */
4751    tcg_gen_andi_i64(cc_dst, o->out, mask);
4752    set_cc_nz_u64(s, cc_dst);
4753    return NO_EXIT;
4754}
4755
4756static ExitStatus op_xi(DisasContext *s, DisasOps *o)
4757{
4758    o->in1 = tcg_temp_new_i64();
4759
4760    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4761        tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4762    } else {
4763        /* Perform the atomic operation in memory. */
4764        tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4765                                     s->insn->data);
4766    }
4767
4768    /* Recompute also for atomic case: needed for setting CC. */
4769    tcg_gen_xor_i64(o->out, o->in1, o->in2);
4770
4771    if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4772        tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4773    }
4774    return NO_EXIT;
4775}
4776
4777static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4778{
4779    o->out = tcg_const_i64(0);
4780    return NO_EXIT;
4781}
4782
4783static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4784{
4785    o->out = tcg_const_i64(0);
4786    o->out2 = o->out;
4787    o->g_out2 = true;
4788    return NO_EXIT;
4789}
4790
4791#ifndef CONFIG_USER_ONLY
4792static ExitStatus op_clp(DisasContext *s, DisasOps *o)
4793{
4794    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4795
4796    check_privileged(s);
4797    gen_helper_clp(cpu_env, r2);
4798    tcg_temp_free_i32(r2);
4799    set_cc_static(s);
4800    return NO_EXIT;
4801}
4802
4803static ExitStatus op_pcilg(DisasContext *s, DisasOps *o)
4804{
4805    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4806    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4807
4808    check_privileged(s);
4809    gen_helper_pcilg(cpu_env, r1, r2);
4810    tcg_temp_free_i32(r1);
4811    tcg_temp_free_i32(r2);
4812    set_cc_static(s);
4813    return NO_EXIT;
4814}
4815
4816static ExitStatus op_pcistg(DisasContext *s, DisasOps *o)
4817{
4818    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4819    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4820
4821    check_privileged(s);
4822    gen_helper_pcistg(cpu_env, r1, r2);
4823    tcg_temp_free_i32(r1);
4824    tcg_temp_free_i32(r2);
4825    set_cc_static(s);
4826    return NO_EXIT;
4827}
4828
4829static ExitStatus op_stpcifc(DisasContext *s, DisasOps *o)
4830{
4831    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4832    TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4833
4834    check_privileged(s);
4835    gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4836    tcg_temp_free_i32(ar);
4837    tcg_temp_free_i32(r1);
4838    set_cc_static(s);
4839    return NO_EXIT;
4840}
4841
4842static ExitStatus op_sic(DisasContext *s, DisasOps *o)
4843{
4844    check_privileged(s);
4845    gen_helper_sic(cpu_env, o->in1, o->in2);
4846    return NO_EXIT;
4847}
4848
4849static ExitStatus op_rpcit(DisasContext *s, DisasOps *o)
4850{
4851    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4852    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4853
4854    check_privileged(s);
4855    gen_helper_rpcit(cpu_env, r1, r2);
4856    tcg_temp_free_i32(r1);
4857    tcg_temp_free_i32(r2);
4858    set_cc_static(s);
4859    return NO_EXIT;
4860}
4861
4862static ExitStatus op_pcistb(DisasContext *s, DisasOps *o)
4863{
4864    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4865    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4866    TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4867
4868    check_privileged(s);
4869    gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4870    tcg_temp_free_i32(ar);
4871    tcg_temp_free_i32(r1);
4872    tcg_temp_free_i32(r3);
4873    set_cc_static(s);
4874    return NO_EXIT;
4875}
4876
4877static ExitStatus op_mpcifc(DisasContext *s, DisasOps *o)
4878{
4879    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4880    TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2));
4881
4882    check_privileged(s);
4883    gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4884    tcg_temp_free_i32(ar);
4885    tcg_temp_free_i32(r1);
4886    set_cc_static(s);
4887    return NO_EXIT;
4888}
4889#endif
4890
4891/* ====================================================================== */
4892/* The "Cc OUTput" generators.  Given the generated output (and in some cases
4893   the original inputs), update the various cc data structures in order to
4894   be able to compute the new condition code.  */
4895
4896static void cout_abs32(DisasContext *s, DisasOps *o)
4897{
4898    gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4899}
4900
4901static void cout_abs64(DisasContext *s, DisasOps *o)
4902{
4903    gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4904}
4905
4906static void cout_adds32(DisasContext *s, DisasOps *o)
4907{
4908    gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4909}
4910
4911static void cout_adds64(DisasContext *s, DisasOps *o)
4912{
4913    gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4914}
4915
4916static void cout_addu32(DisasContext *s, DisasOps *o)
4917{
4918    gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4919}
4920
4921static void cout_addu64(DisasContext *s, DisasOps *o)
4922{
4923    gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4924}
4925
4926static void cout_addc32(DisasContext *s, DisasOps *o)
4927{
4928    gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4929}
4930
4931static void cout_addc64(DisasContext *s, DisasOps *o)
4932{
4933    gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4934}
4935
4936static void cout_cmps32(DisasContext *s, DisasOps *o)
4937{
4938    gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4939}
4940
4941static void cout_cmps64(DisasContext *s, DisasOps *o)
4942{
4943    gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4944}
4945
4946static void cout_cmpu32(DisasContext *s, DisasOps *o)
4947{
4948    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4949}
4950
4951static void cout_cmpu64(DisasContext *s, DisasOps *o)
4952{
4953    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4954}
4955
4956static void cout_f32(DisasContext *s, DisasOps *o)
4957{
4958    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4959}
4960
4961static void cout_f64(DisasContext *s, DisasOps *o)
4962{
4963    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4964}
4965
4966static void cout_f128(DisasContext *s, DisasOps *o)
4967{
4968    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4969}
4970
4971static void cout_nabs32(DisasContext *s, DisasOps *o)
4972{
4973    gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4974}
4975
4976static void cout_nabs64(DisasContext *s, DisasOps *o)
4977{
4978    gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4979}
4980
4981static void cout_neg32(DisasContext *s, DisasOps *o)
4982{
4983    gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4984}
4985
4986static void cout_neg64(DisasContext *s, DisasOps *o)
4987{
4988    gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4989}
4990
4991static void cout_nz32(DisasContext *s, DisasOps *o)
4992{
4993    tcg_gen_ext32u_i64(cc_dst, o->out);
4994    gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4995}
4996
4997static void cout_nz64(DisasContext *s, DisasOps *o)
4998{
4999    gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5000}
5001
5002static void cout_s32(DisasContext *s, DisasOps *o)
5003{
5004    gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5005}
5006
5007static void cout_s64(DisasContext *s, DisasOps *o)
5008{
5009    gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5010}
5011
5012static void cout_subs32(DisasContext *s, DisasOps *o)
5013{
5014    gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5015}
5016
5017static void cout_subs64(DisasContext *s, DisasOps *o)
5018{
5019    gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5020}
5021
5022static void cout_subu32(DisasContext *s, DisasOps *o)
5023{
5024    gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
5025}
5026
5027static void cout_subu64(DisasContext *s, DisasOps *o)
5028{
5029    gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
5030}
5031
5032static void cout_subb32(DisasContext *s, DisasOps *o)
5033{
5034    gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
5035}
5036
5037static void cout_subb64(DisasContext *s, DisasOps *o)
5038{
5039    gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
5040}
5041
5042static void cout_tm32(DisasContext *s, DisasOps *o)
5043{
5044    gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5045}
5046
5047static void cout_tm64(DisasContext *s, DisasOps *o)
5048{
5049    gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5050}
5051
5052/* ====================================================================== */
5053/* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5054   with the TCG register to which we will write.  Used in combination with
5055   the "wout" generators, in some cases we need a new temporary, and in
5056   some cases we can write to a TCG global.  */
5057
5058static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
5059{
5060    o->out = tcg_temp_new_i64();
5061}
5062#define SPEC_prep_new 0
5063
5064static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
5065{
5066    o->out = tcg_temp_new_i64();
5067    o->out2 = tcg_temp_new_i64();
5068}
5069#define SPEC_prep_new_P 0
5070
5071static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5072{
5073    o->out = regs[get_field(f, r1)];
5074    o->g_out = true;
5075}
5076#define SPEC_prep_r1 0
5077
5078static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
5079{
5080    int r1 = get_field(f, r1);
5081    o->out = regs[r1];
5082    o->out2 = regs[r1 + 1];
5083    o->g_out = o->g_out2 = true;
5084}
5085#define SPEC_prep_r1_P SPEC_r1_even
5086
5087static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5088{
5089    o->out = fregs[get_field(f, r1)];
5090    o->g_out = true;
5091}
5092#define SPEC_prep_f1 0
5093
5094static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5095{
5096    int r1 = get_field(f, r1);
5097    o->out = fregs[r1];
5098    o->out2 = fregs[r1 + 2];
5099    o->g_out = o->g_out2 = true;
5100}
5101#define SPEC_prep_x1 SPEC_r1_f128
5102
5103/* ====================================================================== */
5104/* The "Write OUTput" generators.  These generally perform some non-trivial
5105   copy of data to TCG globals, or to main memory.  The trivial cases are
5106   generally handled by having a "prep" generator install the TCG global
5107   as the destination of the operation.  */
5108
5109static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5110{
5111    store_reg(get_field(f, r1), o->out);
5112}
5113#define SPEC_wout_r1 0
5114
5115static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5116{
5117    int r1 = get_field(f, r1);
5118    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5119}
5120#define SPEC_wout_r1_8 0
5121
5122static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5123{
5124    int r1 = get_field(f, r1);
5125    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5126}
5127#define SPEC_wout_r1_16 0
5128
5129static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5130{
5131    store_reg32_i64(get_field(f, r1), o->out);
5132}
5133#define SPEC_wout_r1_32 0
5134
5135static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
5136{
5137    store_reg32h_i64(get_field(f, r1), o->out);
5138}
5139#define SPEC_wout_r1_32h 0
5140
5141static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5142{
5143    int r1 = get_field(f, r1);
5144    store_reg32_i64(r1, o->out);
5145    store_reg32_i64(r1 + 1, o->out2);
5146}
5147#define SPEC_wout_r1_P32 SPEC_r1_even
5148
5149static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5150{
5151    int r1 = get_field(f, r1);
5152    store_reg32_i64(r1 + 1, o->out);
5153    tcg_gen_shri_i64(o->out, o->out, 32);
5154    store_reg32_i64(r1, o->out);
5155}
5156#define SPEC_wout_r1_D32 SPEC_r1_even
5157
5158static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
5159{
5160    int r3 = get_field(f, r3);
5161    store_reg32_i64(r3, o->out);
5162    store_reg32_i64(r3 + 1, o->out2);
5163}
5164#define SPEC_wout_r3_P32 SPEC_r3_even
5165
5166static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
5167{
5168    int r3 = get_field(f, r3);
5169    store_reg(r3, o->out);
5170    store_reg(r3 + 1, o->out2);
5171}
5172#define SPEC_wout_r3_P64 SPEC_r3_even
5173
5174static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5175{
5176    store_freg32_i64(get_field(f, r1), o->out);
5177}
5178#define SPEC_wout_e1 0
5179
5180static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
5181{
5182    store_freg(get_field(f, r1), o->out);
5183}
5184#define SPEC_wout_f1 0
5185
5186static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
5187{
5188    int f1 = get_field(s->fields, r1);
5189    store_freg(f1, o->out);
5190    store_freg(f1 + 2, o->out2);
5191}
5192#define SPEC_wout_x1 SPEC_r1_f128
5193
5194static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5195{
5196    if (get_field(f, r1) != get_field(f, r2)) {
5197        store_reg32_i64(get_field(f, r1), o->out);
5198    }
5199}
5200#define SPEC_wout_cond_r1r2_32 0
5201
5202static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
5203{
5204    if (get_field(f, r1) != get_field(f, r2)) {
5205        store_freg32_i64(get_field(f, r1), o->out);
5206    }
5207}
5208#define SPEC_wout_cond_e1e2 0
5209
5210static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
5211{
5212    tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5213}
5214#define SPEC_wout_m1_8 0
5215
5216static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
5217{
5218    tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5219}
5220#define SPEC_wout_m1_16 0
5221
5222#ifndef CONFIG_USER_ONLY
5223static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o)
5224{
5225    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5226}
5227#define SPEC_wout_m1_16a 0
5228#endif
5229
5230static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5231{
5232    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5233}
5234#define SPEC_wout_m1_32 0
5235
5236#ifndef CONFIG_USER_ONLY
5237static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o)
5238{
5239    tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5240}
5241#define SPEC_wout_m1_32a 0
5242#endif
5243
5244static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5245{
5246    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5247}
5248#define SPEC_wout_m1_64 0
5249
5250#ifndef CONFIG_USER_ONLY
5251static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5252{
5253    tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
5254}
5255#define SPEC_wout_m1_64a 0
5256#endif
5257
5258static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
5259{
5260    tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5261}
5262#define SPEC_wout_m2_32 0
5263
5264static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5265{
5266    store_reg(get_field(f, r1), o->in2);
5267}
5268#define SPEC_wout_in2_r1 0
5269
5270static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
5271{
5272    store_reg32_i64(get_field(f, r1), o->in2);
5273}
5274#define SPEC_wout_in2_r1_32 0
5275
5276/* ====================================================================== */
5277/* The "INput 1" generators.  These load the first operand to an insn.  */
5278
5279static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5280{
5281    o->in1 = load_reg(get_field(f, r1));
5282}
5283#define SPEC_in1_r1 0
5284
5285static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5286{
5287    o->in1 = regs[get_field(f, r1)];
5288    o->g_in1 = true;
5289}
5290#define SPEC_in1_r1_o 0
5291
5292static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5293{
5294    o->in1 = tcg_temp_new_i64();
5295    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5296}
5297#define SPEC_in1_r1_32s 0
5298
5299static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5300{
5301    o->in1 = tcg_temp_new_i64();
5302    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5303}
5304#define SPEC_in1_r1_32u 0
5305
5306static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5307{
5308    o->in1 = tcg_temp_new_i64();
5309    tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5310}
5311#define SPEC_in1_r1_sr32 0
5312
5313static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5314{
5315    o->in1 = load_reg(get_field(f, r1) + 1);
5316}
5317#define SPEC_in1_r1p1 SPEC_r1_even
5318
5319static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5320{
5321    o->in1 = tcg_temp_new_i64();
5322    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5323}
5324#define SPEC_in1_r1p1_32s SPEC_r1_even
5325
5326static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5327{
5328    o->in1 = tcg_temp_new_i64();
5329    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5330}
5331#define SPEC_in1_r1p1_32u SPEC_r1_even
5332
5333static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5334{
5335    int r1 = get_field(f, r1);
5336    o->in1 = tcg_temp_new_i64();
5337    tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5338}
5339#define SPEC_in1_r1_D32 SPEC_r1_even
5340
5341static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5342{
5343    o->in1 = load_reg(get_field(f, r2));
5344}
5345#define SPEC_in1_r2 0
5346
5347static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5348{
5349    o->in1 = tcg_temp_new_i64();
5350    tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5351}
5352#define SPEC_in1_r2_sr32 0
5353
5354static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5355{
5356    o->in1 = load_reg(get_field(f, r3));
5357}
5358#define SPEC_in1_r3 0
5359
5360static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5361{
5362    o->in1 = regs[get_field(f, r3)];
5363    o->g_in1 = true;
5364}
5365#define SPEC_in1_r3_o 0
5366
5367static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5368{
5369    o->in1 = tcg_temp_new_i64();
5370    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5371}
5372#define SPEC_in1_r3_32s 0
5373
5374static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5375{
5376    o->in1 = tcg_temp_new_i64();
5377    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5378}
5379#define SPEC_in1_r3_32u 0
5380
5381static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5382{
5383    int r3 = get_field(f, r3);
5384    o->in1 = tcg_temp_new_i64();
5385    tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5386}
5387#define SPEC_in1_r3_D32 SPEC_r3_even
5388
5389static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5390{
5391    o->in1 = load_freg32_i64(get_field(f, r1));
5392}
5393#define SPEC_in1_e1 0
5394
5395static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5396{
5397    o->in1 = fregs[get_field(f, r1)];
5398    o->g_in1 = true;
5399}
5400#define SPEC_in1_f1_o 0
5401
5402static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5403{
5404    int r1 = get_field(f, r1);
5405    o->out = fregs[r1];
5406    o->out2 = fregs[r1 + 2];
5407    o->g_out = o->g_out2 = true;
5408}
5409#define SPEC_in1_x1_o SPEC_r1_f128
5410
5411static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5412{
5413    o->in1 = fregs[get_field(f, r3)];
5414    o->g_in1 = true;
5415}
5416#define SPEC_in1_f3_o 0
5417
5418static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5419{
5420    o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5421}
5422#define SPEC_in1_la1 0
5423
5424static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5425{
5426    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5427    o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5428}
5429#define SPEC_in1_la2 0
5430
5431static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5432{
5433    in1_la1(s, f, o);
5434    o->in1 = tcg_temp_new_i64();
5435    tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5436}
5437#define SPEC_in1_m1_8u 0
5438
5439static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5440{
5441    in1_la1(s, f, o);
5442    o->in1 = tcg_temp_new_i64();
5443    tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5444}
5445#define SPEC_in1_m1_16s 0
5446
5447static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5448{
5449    in1_la1(s, f, o);
5450    o->in1 = tcg_temp_new_i64();
5451    tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5452}
5453#define SPEC_in1_m1_16u 0
5454
5455static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5456{
5457    in1_la1(s, f, o);
5458    o->in1 = tcg_temp_new_i64();
5459    tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5460}
5461#define SPEC_in1_m1_32s 0
5462
5463static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5464{
5465    in1_la1(s, f, o);
5466    o->in1 = tcg_temp_new_i64();
5467    tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5468}
5469#define SPEC_in1_m1_32u 0
5470
5471static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5472{
5473    in1_la1(s, f, o);
5474    o->in1 = tcg_temp_new_i64();
5475    tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5476}
5477#define SPEC_in1_m1_64 0
5478
5479/* ====================================================================== */
5480/* The "INput 2" generators.  These load the second operand to an insn.  */
5481
5482static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5483{
5484    o->in2 = regs[get_field(f, r1)];
5485    o->g_in2 = true;
5486}
5487#define SPEC_in2_r1_o 0
5488
5489static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5490{
5491    o->in2 = tcg_temp_new_i64();
5492    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5493}
5494#define SPEC_in2_r1_16u 0
5495
5496static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5497{
5498    o->in2 = tcg_temp_new_i64();
5499    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5500}
5501#define SPEC_in2_r1_32u 0
5502
5503static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5504{
5505    int r1 = get_field(f, r1);
5506    o->in2 = tcg_temp_new_i64();
5507    tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5508}
5509#define SPEC_in2_r1_D32 SPEC_r1_even
5510
5511static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5512{
5513    o->in2 = load_reg(get_field(f, r2));
5514}
5515#define SPEC_in2_r2 0
5516
5517static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5518{
5519    o->in2 = regs[get_field(f, r2)];
5520    o->g_in2 = true;
5521}
5522#define SPEC_in2_r2_o 0
5523
5524static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5525{
5526    int r2 = get_field(f, r2);
5527    if (r2 != 0) {
5528        o->in2 = load_reg(r2);
5529    }
5530}
5531#define SPEC_in2_r2_nz 0
5532
5533static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5534{
5535    o->in2 = tcg_temp_new_i64();
5536    tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5537}
5538#define SPEC_in2_r2_8s 0
5539
5540static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5541{
5542    o->in2 = tcg_temp_new_i64();
5543    tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5544}
5545#define SPEC_in2_r2_8u 0
5546
5547static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5548{
5549    o->in2 = tcg_temp_new_i64();
5550    tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5551}
5552#define SPEC_in2_r2_16s 0
5553
5554static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5555{
5556    o->in2 = tcg_temp_new_i64();
5557    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5558}
5559#define SPEC_in2_r2_16u 0
5560
5561static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5562{
5563    o->in2 = load_reg(get_field(f, r3));
5564}
5565#define SPEC_in2_r3 0
5566
5567static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5568{
5569    o->in2 = tcg_temp_new_i64();
5570    tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5571}
5572#define SPEC_in2_r3_sr32 0
5573
5574static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5575{
5576    o->in2 = tcg_temp_new_i64();
5577    tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5578}
5579#define SPEC_in2_r2_32s 0
5580
5581static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5582{
5583    o->in2 = tcg_temp_new_i64();
5584    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5585}
5586#define SPEC_in2_r2_32u 0
5587
5588static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5589{
5590    o->in2 = tcg_temp_new_i64();
5591    tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5592}
5593#define SPEC_in2_r2_sr32 0
5594
5595static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5596{
5597    o->in2 = load_freg32_i64(get_field(f, r2));
5598}
5599#define SPEC_in2_e2 0
5600
5601static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5602{
5603    o->in2 = fregs[get_field(f, r2)];
5604    o->g_in2 = true;
5605}
5606#define SPEC_in2_f2_o 0
5607
5608static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5609{
5610    int r2 = get_field(f, r2);
5611    o->in1 = fregs[r2];
5612    o->in2 = fregs[r2 + 2];
5613    o->g_in1 = o->g_in2 = true;
5614}
5615#define SPEC_in2_x2_o SPEC_r2_f128
5616
5617static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5618{
5619    o->in2 = get_address(s, 0, get_field(f, r2), 0);
5620}
5621#define SPEC_in2_ra2 0
5622
5623static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5624{
5625    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5626    o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5627}
5628#define SPEC_in2_a2 0
5629
5630static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5631{
5632    o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5633}
5634#define SPEC_in2_ri2 0
5635
5636static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5637{
5638    help_l2_shift(s, f, o, 31);
5639}
5640#define SPEC_in2_sh32 0
5641
5642static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5643{
5644    help_l2_shift(s, f, o, 63);
5645}
5646#define SPEC_in2_sh64 0
5647
5648static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5649{
5650    in2_a2(s, f, o);
5651    tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5652}
5653#define SPEC_in2_m2_8u 0
5654
5655static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5656{
5657    in2_a2(s, f, o);
5658    tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5659}
5660#define SPEC_in2_m2_16s 0
5661
5662static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5663{
5664    in2_a2(s, f, o);
5665    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5666}
5667#define SPEC_in2_m2_16u 0
5668
5669static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5670{
5671    in2_a2(s, f, o);
5672    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5673}
5674#define SPEC_in2_m2_32s 0
5675
5676static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5677{
5678    in2_a2(s, f, o);
5679    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5680}
5681#define SPEC_in2_m2_32u 0
5682
5683#ifndef CONFIG_USER_ONLY
5684static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o)
5685{
5686    in2_a2(s, f, o);
5687    tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5688}
5689#define SPEC_in2_m2_32ua 0
5690#endif
5691
5692static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5693{
5694    in2_a2(s, f, o);
5695    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5696}
5697#define SPEC_in2_m2_64 0
5698
5699#ifndef CONFIG_USER_ONLY
5700static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o)
5701{
5702    in2_a2(s, f, o);
5703    tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
5704}
5705#define SPEC_in2_m2_64a 0
5706#endif
5707
5708static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5709{
5710    in2_ri2(s, f, o);
5711    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5712}
5713#define SPEC_in2_mri2_16u 0
5714
5715static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5716{
5717    in2_ri2(s, f, o);
5718    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5719}
5720#define SPEC_in2_mri2_32s 0
5721
5722static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5723{
5724    in2_ri2(s, f, o);
5725    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5726}
5727#define SPEC_in2_mri2_32u 0
5728
5729static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5730{
5731    in2_ri2(s, f, o);
5732    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5733}
5734#define SPEC_in2_mri2_64 0
5735
5736static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5737{
5738    o->in2 = tcg_const_i64(get_field(f, i2));
5739}
5740#define SPEC_in2_i2 0
5741
5742static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5743{
5744    o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5745}
5746#define SPEC_in2_i2_8u 0
5747
5748static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5749{
5750    o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5751}
5752#define SPEC_in2_i2_16u 0
5753
5754static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5755{
5756    o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5757}
5758#define SPEC_in2_i2_32u 0
5759
5760static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5761{
5762    uint64_t i2 = (uint16_t)get_field(f, i2);
5763    o->in2 = tcg_const_i64(i2 << s->insn->data);
5764}
5765#define SPEC_in2_i2_16u_shl 0
5766
5767static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5768{
5769    uint64_t i2 = (uint32_t)get_field(f, i2);
5770    o->in2 = tcg_const_i64(i2 << s->insn->data);
5771}
5772#define SPEC_in2_i2_32u_shl 0
5773
5774#ifndef CONFIG_USER_ONLY
5775static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5776{
5777    o->in2 = tcg_const_i64(s->fields->raw_insn);
5778}
5779#define SPEC_in2_insn 0
5780#endif
5781
5782/* ====================================================================== */
5783
5784/* Find opc within the table of insns.  This is formulated as a switch
5785   statement so that (1) we get compile-time notice of cut-paste errors
5786   for duplicated opcodes, and (2) the compiler generates the binary
5787   search tree, rather than us having to post-process the table.  */
5788
5789#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5790    D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5791
5792#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5793
5794enum DisasInsnEnum {
5795#include "insn-data.def"
5796};
5797
5798#undef D
5799#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) {                       \
5800    .opc = OPC,                                                             \
5801    .fmt = FMT_##FT,                                                        \
5802    .fac = FAC_##FC,                                                        \
5803    .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
5804    .name = #NM,                                                            \
5805    .help_in1 = in1_##I1,                                                   \
5806    .help_in2 = in2_##I2,                                                   \
5807    .help_prep = prep_##P,                                                  \
5808    .help_wout = wout_##W,                                                  \
5809    .help_cout = cout_##CC,                                                 \
5810    .help_op = op_##OP,                                                     \
5811    .data = D                                                               \
5812 },
5813
5814/* Allow 0 to be used for NULL in the table below.  */
5815#define in1_0  NULL
5816#define in2_0  NULL
5817#define prep_0  NULL
5818#define wout_0  NULL
5819#define cout_0  NULL
5820#define op_0  NULL
5821
5822#define SPEC_in1_0 0
5823#define SPEC_in2_0 0
5824#define SPEC_prep_0 0
5825#define SPEC_wout_0 0
5826
5827/* Give smaller names to the various facilities.  */
5828#define FAC_Z           S390_FEAT_ZARCH
5829#define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5830#define FAC_DFP         S390_FEAT_DFP
5831#define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5832#define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
5833#define FAC_EE          S390_FEAT_EXECUTE_EXT
5834#define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
5835#define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
5836#define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5837#define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5838#define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5839#define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
5840#define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
5841#define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5842#define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5843#define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
5844#define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
5845#define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
5846#define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
5847#define FAC_PC          S390_FEAT_STFLE_45 /* population count */
5848#define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
5849#define FAC_SFLE        S390_FEAT_STFLE
5850#define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5851#define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5852#define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5853#define FAC_DAT_ENH     S390_FEAT_DAT_ENH
5854#define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
5855#define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
5856#define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
5857#define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5858#define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
5859#define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
5860#define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5861#define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5862#define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5863#define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
5864#define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
5865#define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
5866
5867static const DisasInsn insn_info[] = {
5868#include "insn-data.def"
5869};
5870
5871#undef D
5872#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5873    case OPC: return &insn_info[insn_ ## NM];
5874
5875static const DisasInsn *lookup_opc(uint16_t opc)
5876{
5877    switch (opc) {
5878#include "insn-data.def"
5879    default:
5880        return NULL;
5881    }
5882}
5883
5884#undef D
5885#undef C
5886
5887/* Extract a field from the insn.  The INSN should be left-aligned in
5888   the uint64_t so that we can more easily utilize the big-bit-endian
5889   definitions we extract from the Principals of Operation.  */
5890
5891static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5892{
5893    uint32_t r, m;
5894
5895    if (f->size == 0) {
5896        return;
5897    }
5898
5899    /* Zero extract the field from the insn.  */
5900    r = (insn << f->beg) >> (64 - f->size);
5901
5902    /* Sign-extend, or un-swap the field as necessary.  */
5903    switch (f->type) {
5904    case 0: /* unsigned */
5905        break;
5906    case 1: /* signed */
5907        assert(f->size <= 32);
5908        m = 1u << (f->size - 1);
5909        r = (r ^ m) - m;
5910        break;
5911    case 2: /* dl+dh split, signed 20 bit. */
5912        r = ((int8_t)r << 12) | (r >> 8);
5913        break;
5914    default:
5915        abort();
5916    }
5917
5918    /* Validate that the "compressed" encoding we selected above is valid.
5919       I.e. we havn't make two different original fields overlap.  */
5920    assert(((o->presentC >> f->indexC) & 1) == 0);
5921    o->presentC |= 1 << f->indexC;
5922    o->presentO |= 1 << f->indexO;
5923
5924    o->c[f->indexC] = r;
5925}
5926
5927/* Lookup the insn at the current PC, extracting the operands into O and
5928   returning the info struct for the insn.  Returns NULL for invalid insn.  */
5929
5930static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5931                                     DisasFields *f)
5932{
5933    uint64_t insn, pc = s->pc;
5934    int op, op2, ilen;
5935    const DisasInsn *info;
5936
5937    if (unlikely(s->ex_value)) {
5938        /* Drop the EX data now, so that it's clear on exception paths.  */
5939        TCGv_i64 zero = tcg_const_i64(0);
5940        tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5941        tcg_temp_free_i64(zero);
5942
5943        /* Extract the values saved by EXECUTE.  */
5944        insn = s->ex_value & 0xffffffffffff0000ull;
5945        ilen = s->ex_value & 0xf;
5946        op = insn >> 56;
5947    } else {
5948        insn = ld_code2(env, pc);
5949        op = (insn >> 8) & 0xff;
5950        ilen = get_ilen(op);
5951        switch (ilen) {
5952        case 2:
5953            insn = insn << 48;
5954            break;
5955        case 4:
5956            insn = ld_code4(env, pc) << 32;
5957            break;
5958        case 6:
5959            insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5960            break;
5961        default:
5962            g_assert_not_reached();
5963        }
5964    }
5965    s->next_pc = s->pc + ilen;
5966    s->ilen = ilen;
5967
5968    /* We can't actually determine the insn format until we've looked up
5969       the full insn opcode.  Which we can't do without locating the
5970       secondary opcode.  Assume by default that OP2 is at bit 40; for
5971       those smaller insns that don't actually have a secondary opcode
5972       this will correctly result in OP2 = 0. */
5973    switch (op) {
5974    case 0x01: /* E */
5975    case 0x80: /* S */
5976    case 0x82: /* S */
5977    case 0x93: /* S */
5978    case 0xb2: /* S, RRF, RRE, IE */
5979    case 0xb3: /* RRE, RRD, RRF */
5980    case 0xb9: /* RRE, RRF */
5981    case 0xe5: /* SSE, SIL */
5982        op2 = (insn << 8) >> 56;
5983        break;
5984    case 0xa5: /* RI */
5985    case 0xa7: /* RI */
5986    case 0xc0: /* RIL */
5987    case 0xc2: /* RIL */
5988    case 0xc4: /* RIL */
5989    case 0xc6: /* RIL */
5990    case 0xc8: /* SSF */
5991    case 0xcc: /* RIL */
5992        op2 = (insn << 12) >> 60;
5993        break;
5994    case 0xc5: /* MII */
5995    case 0xc7: /* SMI */
5996    case 0xd0 ... 0xdf: /* SS */
5997    case 0xe1: /* SS */
5998    case 0xe2: /* SS */
5999    case 0xe8: /* SS */
6000    case 0xe9: /* SS */
6001    case 0xea: /* SS */
6002    case 0xee ... 0xf3: /* SS */
6003    case 0xf8 ... 0xfd: /* SS */
6004        op2 = 0;
6005        break;
6006    default:
6007        op2 = (insn << 40) >> 56;
6008        break;
6009    }
6010
6011    memset(f, 0, sizeof(*f));
6012    f->raw_insn = insn;
6013    f->op = op;
6014    f->op2 = op2;
6015
6016    /* Lookup the instruction.  */
6017    info = lookup_opc(op << 8 | op2);
6018
6019    /* If we found it, extract the operands.  */
6020    if (info != NULL) {
6021        DisasFormat fmt = info->fmt;
6022        int i;
6023
6024        for (i = 0; i < NUM_C_FIELD; ++i) {
6025            extract_field(f, &format_info[fmt].op[i], insn);
6026        }
6027    }
6028    return info;
6029}
6030
6031static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
6032{
6033    const DisasInsn *insn;
6034    ExitStatus ret = NO_EXIT;
6035    DisasFields f;
6036    DisasOps o;
6037
6038    /* Search for the insn in the table.  */
6039    insn = extract_insn(env, s, &f);
6040
6041    /* Not found means unimplemented/illegal opcode.  */
6042    if (insn == NULL) {
6043        qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6044                      f.op, f.op2);
6045        gen_illegal_opcode(s);
6046        return EXIT_NORETURN;
6047    }
6048
6049#ifndef CONFIG_USER_ONLY
6050    if (s->tb->flags & FLAG_MASK_PER) {
6051        TCGv_i64 addr = tcg_const_i64(s->pc);
6052        gen_helper_per_ifetch(cpu_env, addr);
6053        tcg_temp_free_i64(addr);
6054    }
6055#endif
6056
6057    /* Check for insn specification exceptions.  */
6058    if (insn->spec) {
6059        int spec = insn->spec, excp = 0, r;
6060
6061        if (spec & SPEC_r1_even) {
6062            r = get_field(&f, r1);
6063            if (r & 1) {
6064                excp = PGM_SPECIFICATION;
6065            }
6066        }
6067        if (spec & SPEC_r2_even) {
6068            r = get_field(&f, r2);
6069            if (r & 1) {
6070                excp = PGM_SPECIFICATION;
6071            }
6072        }
6073        if (spec & SPEC_r3_even) {
6074            r = get_field(&f, r3);
6075            if (r & 1) {
6076                excp = PGM_SPECIFICATION;
6077            }
6078        }
6079        if (spec & SPEC_r1_f128) {
6080            r = get_field(&f, r1);
6081            if (r > 13) {
6082                excp = PGM_SPECIFICATION;
6083            }
6084        }
6085        if (spec & SPEC_r2_f128) {
6086            r = get_field(&f, r2);
6087            if (r > 13) {
6088                excp = PGM_SPECIFICATION;
6089            }
6090        }
6091        if (excp) {
6092            gen_program_exception(s, excp);
6093            return EXIT_NORETURN;
6094        }
6095    }
6096
6097    /* Set up the strutures we use to communicate with the helpers. */
6098    s->insn = insn;
6099    s->fields = &f;
6100    o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
6101    o.out = NULL;
6102    o.out2 = NULL;
6103    o.in1 = NULL;
6104    o.in2 = NULL;
6105    o.addr1 = NULL;
6106
6107    /* Implement the instruction.  */
6108    if (insn->help_in1) {
6109        insn->help_in1(s, &f, &o);
6110    }
6111    if (insn->help_in2) {
6112        insn->help_in2(s, &f, &o);
6113    }
6114    if (insn->help_prep) {
6115        insn->help_prep(s, &f, &o);
6116    }
6117    if (insn->help_op) {
6118        ret = insn->help_op(s, &o);
6119    }
6120    if (insn->help_wout) {
6121        insn->help_wout(s, &f, &o);
6122    }
6123    if (insn->help_cout) {
6124        insn->help_cout(s, &o);
6125    }
6126
6127    /* Free any temporaries created by the helpers.  */
6128    if (o.out && !o.g_out) {
6129        tcg_temp_free_i64(o.out);
6130    }
6131    if (o.out2 && !o.g_out2) {
6132        tcg_temp_free_i64(o.out2);
6133    }
6134    if (o.in1 && !o.g_in1) {
6135        tcg_temp_free_i64(o.in1);
6136    }
6137    if (o.in2 && !o.g_in2) {
6138        tcg_temp_free_i64(o.in2);
6139    }
6140    if (o.addr1) {
6141        tcg_temp_free_i64(o.addr1);
6142    }
6143
6144#ifndef CONFIG_USER_ONLY
6145    if (s->tb->flags & FLAG_MASK_PER) {
6146        /* An exception might be triggered, save PSW if not already done.  */
6147        if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
6148            tcg_gen_movi_i64(psw_addr, s->next_pc);
6149        }
6150
6151        /* Call the helper to check for a possible PER exception.  */
6152        gen_helper_per_check_exception(cpu_env);
6153    }
6154#endif
6155
6156    /* Advance to the next instruction.  */
6157    s->pc = s->next_pc;
6158    return ret;
6159}
6160
6161void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
6162{
6163    CPUS390XState *env = cs->env_ptr;
6164    DisasContext dc;
6165    target_ulong pc_start;
6166    uint64_t next_page_start;
6167    int num_insns, max_insns;
6168    ExitStatus status;
6169    bool do_debug;
6170
6171    pc_start = tb->pc;
6172
6173    /* 31-bit mode */
6174    if (!(tb->flags & FLAG_MASK_64)) {
6175        pc_start &= 0x7fffffff;
6176    }
6177
6178    dc.tb = tb;
6179    dc.pc = pc_start;
6180    dc.cc_op = CC_OP_DYNAMIC;
6181    dc.ex_value = tb->cs_base;
6182    do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
6183
6184    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
6185
6186    num_insns = 0;
6187    max_insns = tb_cflags(tb) & CF_COUNT_MASK;
6188    if (max_insns == 0) {
6189        max_insns = CF_COUNT_MASK;
6190    }
6191    if (max_insns > TCG_MAX_INSNS) {
6192        max_insns = TCG_MAX_INSNS;
6193    }
6194
6195    gen_tb_start(tb);
6196
6197    do {
6198        tcg_gen_insn_start(dc.pc, dc.cc_op);
6199        num_insns++;
6200
6201        if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
6202            status = EXIT_PC_STALE;
6203            do_debug = true;
6204            /* The address covered by the breakpoint must be included in
6205               [tb->pc, tb->pc + tb->size) in order to for it to be
6206               properly cleared -- thus we increment the PC here so that
6207               the logic setting tb->size below does the right thing.  */
6208            dc.pc += 2;
6209            break;
6210        }
6211
6212        if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
6213            gen_io_start();
6214        }
6215
6216        status = translate_one(env, &dc);
6217
6218        /* If we reach a page boundary, are single stepping,
6219           or exhaust instruction count, stop generation.  */
6220        if (status == NO_EXIT
6221            && (dc.pc >= next_page_start
6222                || tcg_op_buf_full()
6223                || num_insns >= max_insns
6224                || singlestep
6225                || cs->singlestep_enabled
6226                || dc.ex_value)) {
6227            status = EXIT_PC_STALE;
6228        }
6229    } while (status == NO_EXIT);
6230
6231    if (tb_cflags(tb) & CF_LAST_IO) {
6232        gen_io_end();
6233    }
6234
6235    switch (status) {
6236    case EXIT_GOTO_TB:
6237    case EXIT_NORETURN:
6238        break;
6239    case EXIT_PC_STALE:
6240    case EXIT_PC_STALE_NOCHAIN:
6241        update_psw_addr(&dc);
6242        /* FALLTHRU */
6243    case EXIT_PC_UPDATED:
6244        /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6245           cc op type is in env */
6246        update_cc_op(&dc);
6247        /* FALLTHRU */
6248    case EXIT_PC_CC_UPDATED:
6249        /* Exit the TB, either by raising a debug exception or by return.  */
6250        if (do_debug) {
6251            gen_exception(EXCP_DEBUG);
6252        } else if (use_exit_tb(&dc) || status == EXIT_PC_STALE_NOCHAIN) {
6253            tcg_gen_exit_tb(0);
6254        } else {
6255            tcg_gen_lookup_and_goto_ptr();
6256        }
6257        break;
6258    default:
6259        g_assert_not_reached();
6260    }
6261
6262    gen_tb_end(tb, num_insns);
6263
6264    tb->size = dc.pc - pc_start;
6265    tb->icount = num_insns;
6266
6267#if defined(S390X_DEBUG_DISAS)
6268    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
6269        && qemu_log_in_addr_range(pc_start)) {
6270        qemu_log_lock();
6271        if (unlikely(dc.ex_value)) {
6272            /* ??? Unfortunately log_target_disas can't use host memory.  */
6273            qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
6274        } else {
6275            qemu_log("IN: %s\n", lookup_symbol(pc_start));
6276            log_target_disas(cs, pc_start, dc.pc - pc_start);
6277            qemu_log("\n");
6278        }
6279        qemu_log_unlock();
6280    }
6281#endif
6282}
6283
6284void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
6285                          target_ulong *data)
6286{
6287    int cc_op = data[1];
6288    env->psw.addr = data[0];
6289    if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6290        env->cc_op = cc_op;
6291    }
6292}
6293