qemu/target/s390x/translate.c
<<
>>
Prefs
   1/*
   2 *  S/390 translation
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2010 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21/* #define DEBUG_INLINE_BRANCHES */
  22#define S390X_DEBUG_DISAS
  23/* #define S390X_DEBUG_DISAS_VERBOSE */
  24
  25#ifdef S390X_DEBUG_DISAS_VERBOSE
  26#  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
  27#else
  28#  define LOG_DISAS(...) do { } while (0)
  29#endif
  30
  31#include "qemu/osdep.h"
  32#include "cpu.h"
  33#include "internal.h"
  34#include "disas/disas.h"
  35#include "exec/exec-all.h"
  36#include "tcg-op.h"
  37#include "qemu/log.h"
  38#include "qemu/host-utils.h"
  39#include "exec/cpu_ldst.h"
  40#include "exec/gen-icount.h"
  41#include "exec/helper-proto.h"
  42#include "exec/helper-gen.h"
  43
  44#include "trace-tcg.h"
  45#include "exec/log.h"
  46
  47
  48/* Information that (most) every instruction needs to manipulate.  */
  49typedef struct DisasContext DisasContext;
  50typedef struct DisasInsn DisasInsn;
  51typedef struct DisasFields DisasFields;
  52
  53struct DisasContext {
  54    struct TranslationBlock *tb;
  55    const DisasInsn *insn;
  56    DisasFields *fields;
  57    uint64_t ex_value;
  58    uint64_t pc, next_pc;
  59    uint32_t ilen;
  60    enum cc_op cc_op;
  61    bool singlestep_enabled;
  62};
  63
  64/* Information carried about a condition to be evaluated.  */
  65typedef struct {
  66    TCGCond cond:8;
  67    bool is_64;
  68    bool g1;
  69    bool g2;
  70    union {
  71        struct { TCGv_i64 a, b; } s64;
  72        struct { TCGv_i32 a, b; } s32;
  73    } u;
  74} DisasCompare;
  75
  76/* is_jmp field values */
  77#define DISAS_EXCP DISAS_TARGET_0
  78
  79#ifdef DEBUG_INLINE_BRANCHES
  80static uint64_t inline_branch_hit[CC_OP_MAX];
  81static uint64_t inline_branch_miss[CC_OP_MAX];
  82#endif
  83
  84static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
  85{
  86    if (!(s->tb->flags & FLAG_MASK_64)) {
  87        if (s->tb->flags & FLAG_MASK_32) {
  88            return pc | 0x80000000;
  89        }
  90    }
  91    return pc;
  92}
  93
  94static TCGv_i64 psw_addr;
  95static TCGv_i64 psw_mask;
  96static TCGv_i64 gbea;
  97
  98static TCGv_i32 cc_op;
  99static TCGv_i64 cc_src;
 100static TCGv_i64 cc_dst;
 101static TCGv_i64 cc_vr;
 102
 103static char cpu_reg_names[32][4];
 104static TCGv_i64 regs[16];
 105static TCGv_i64 fregs[16];
 106
 107void s390x_translate_init(void)
 108{
 109    int i;
 110
 111    psw_addr = tcg_global_mem_new_i64(cpu_env,
 112                                      offsetof(CPUS390XState, psw.addr),
 113                                      "psw_addr");
 114    psw_mask = tcg_global_mem_new_i64(cpu_env,
 115                                      offsetof(CPUS390XState, psw.mask),
 116                                      "psw_mask");
 117    gbea = tcg_global_mem_new_i64(cpu_env,
 118                                  offsetof(CPUS390XState, gbea),
 119                                  "gbea");
 120
 121    cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
 122                                   "cc_op");
 123    cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
 124                                    "cc_src");
 125    cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
 126                                    "cc_dst");
 127    cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
 128                                   "cc_vr");
 129
 130    for (i = 0; i < 16; i++) {
 131        snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
 132        regs[i] = tcg_global_mem_new(cpu_env,
 133                                     offsetof(CPUS390XState, regs[i]),
 134                                     cpu_reg_names[i]);
 135    }
 136
 137    for (i = 0; i < 16; i++) {
 138        snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
 139        fregs[i] = tcg_global_mem_new(cpu_env,
 140                                      offsetof(CPUS390XState, vregs[i][0].d),
 141                                      cpu_reg_names[i + 16]);
 142    }
 143}
 144
 145static TCGv_i64 load_reg(int reg)
 146{
 147    TCGv_i64 r = tcg_temp_new_i64();
 148    tcg_gen_mov_i64(r, regs[reg]);
 149    return r;
 150}
 151
 152static TCGv_i64 load_freg32_i64(int reg)
 153{
 154    TCGv_i64 r = tcg_temp_new_i64();
 155    tcg_gen_shri_i64(r, fregs[reg], 32);
 156    return r;
 157}
 158
 159static void store_reg(int reg, TCGv_i64 v)
 160{
 161    tcg_gen_mov_i64(regs[reg], v);
 162}
 163
 164static void store_freg(int reg, TCGv_i64 v)
 165{
 166    tcg_gen_mov_i64(fregs[reg], v);
 167}
 168
 169static void store_reg32_i64(int reg, TCGv_i64 v)
 170{
 171    /* 32 bit register writes keep the upper half */
 172    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
 173}
 174
 175static void store_reg32h_i64(int reg, TCGv_i64 v)
 176{
 177    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
 178}
 179
 180static void store_freg32_i64(int reg, TCGv_i64 v)
 181{
 182    tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
 183}
 184
 185static void return_low128(TCGv_i64 dest)
 186{
 187    tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
 188}
 189
 190static void update_psw_addr(DisasContext *s)
 191{
 192    /* psw.addr */
 193    tcg_gen_movi_i64(psw_addr, s->pc);
 194}
 195
 196static void per_branch(DisasContext *s, bool to_next)
 197{
 198#ifndef CONFIG_USER_ONLY
 199    tcg_gen_movi_i64(gbea, s->pc);
 200
 201    if (s->tb->flags & FLAG_MASK_PER) {
 202        TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
 203        gen_helper_per_branch(cpu_env, gbea, next_pc);
 204        if (to_next) {
 205            tcg_temp_free_i64(next_pc);
 206        }
 207    }
 208#endif
 209}
 210
 211static void per_branch_cond(DisasContext *s, TCGCond cond,
 212                            TCGv_i64 arg1, TCGv_i64 arg2)
 213{
 214#ifndef CONFIG_USER_ONLY
 215    if (s->tb->flags & FLAG_MASK_PER) {
 216        TCGLabel *lab = gen_new_label();
 217        tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
 218
 219        tcg_gen_movi_i64(gbea, s->pc);
 220        gen_helper_per_branch(cpu_env, gbea, psw_addr);
 221
 222        gen_set_label(lab);
 223    } else {
 224        TCGv_i64 pc = tcg_const_i64(s->pc);
 225        tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
 226        tcg_temp_free_i64(pc);
 227    }
 228#endif
 229}
 230
 231static void per_breaking_event(DisasContext *s)
 232{
 233    tcg_gen_movi_i64(gbea, s->pc);
 234}
 235
 236static void update_cc_op(DisasContext *s)
 237{
 238    if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
 239        tcg_gen_movi_i32(cc_op, s->cc_op);
 240    }
 241}
 242
 243static void potential_page_fault(DisasContext *s)
 244{
 245    update_psw_addr(s);
 246    update_cc_op(s);
 247}
 248
 249static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
 250{
 251    return (uint64_t)cpu_lduw_code(env, pc);
 252}
 253
 254static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
 255{
 256    return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
 257}
 258
 259static int get_mem_index(DisasContext *s)
 260{
 261    switch (s->tb->flags & FLAG_MASK_ASC) {
 262    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
 263        return 0;
 264    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
 265        return 1;
 266    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
 267        return 2;
 268    default:
 269        tcg_abort();
 270        break;
 271    }
 272}
 273
 274static void gen_exception(int excp)
 275{
 276    TCGv_i32 tmp = tcg_const_i32(excp);
 277    gen_helper_exception(cpu_env, tmp);
 278    tcg_temp_free_i32(tmp);
 279}
 280
 281static void gen_program_exception(DisasContext *s, int code)
 282{
 283    TCGv_i32 tmp;
 284
 285    /* Remember what pgm exeption this was.  */
 286    tmp = tcg_const_i32(code);
 287    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
 288    tcg_temp_free_i32(tmp);
 289
 290    tmp = tcg_const_i32(s->ilen);
 291    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
 292    tcg_temp_free_i32(tmp);
 293
 294    /* update the psw */
 295    update_psw_addr(s);
 296
 297    /* Save off cc.  */
 298    update_cc_op(s);
 299
 300    /* Trigger exception.  */
 301    gen_exception(EXCP_PGM);
 302}
 303
 304static inline void gen_illegal_opcode(DisasContext *s)
 305{
 306    gen_program_exception(s, PGM_OPERATION);
 307}
 308
 309static inline void gen_trap(DisasContext *s)
 310{
 311    TCGv_i32 t;
 312
 313    /* Set DXC to 0xff.  */
 314    t = tcg_temp_new_i32();
 315    tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
 316    tcg_gen_ori_i32(t, t, 0xff00);
 317    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
 318    tcg_temp_free_i32(t);
 319
 320    gen_program_exception(s, PGM_DATA);
 321}
 322
 323#ifndef CONFIG_USER_ONLY
 324static void check_privileged(DisasContext *s)
 325{
 326    if (s->tb->flags & FLAG_MASK_PSTATE) {
 327        gen_program_exception(s, PGM_PRIVILEGED);
 328    }
 329}
 330#endif
 331
 332static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
 333{
 334    TCGv_i64 tmp = tcg_temp_new_i64();
 335    bool need_31 = !(s->tb->flags & FLAG_MASK_64);
 336
 337    /* Note that d2 is limited to 20 bits, signed.  If we crop negative
 338       displacements early we create larger immedate addends.  */
 339
 340    /* Note that addi optimizes the imm==0 case.  */
 341    if (b2 && x2) {
 342        tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
 343        tcg_gen_addi_i64(tmp, tmp, d2);
 344    } else if (b2) {
 345        tcg_gen_addi_i64(tmp, regs[b2], d2);
 346    } else if (x2) {
 347        tcg_gen_addi_i64(tmp, regs[x2], d2);
 348    } else {
 349        if (need_31) {
 350            d2 &= 0x7fffffff;
 351            need_31 = false;
 352        }
 353        tcg_gen_movi_i64(tmp, d2);
 354    }
 355    if (need_31) {
 356        tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
 357    }
 358
 359    return tmp;
 360}
 361
 362static inline bool live_cc_data(DisasContext *s)
 363{
 364    return (s->cc_op != CC_OP_DYNAMIC
 365            && s->cc_op != CC_OP_STATIC
 366            && s->cc_op > 3);
 367}
 368
 369static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
 370{
 371    if (live_cc_data(s)) {
 372        tcg_gen_discard_i64(cc_src);
 373        tcg_gen_discard_i64(cc_dst);
 374        tcg_gen_discard_i64(cc_vr);
 375    }
 376    s->cc_op = CC_OP_CONST0 + val;
 377}
 378
 379static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
 380{
 381    if (live_cc_data(s)) {
 382        tcg_gen_discard_i64(cc_src);
 383        tcg_gen_discard_i64(cc_vr);
 384    }
 385    tcg_gen_mov_i64(cc_dst, dst);
 386    s->cc_op = op;
 387}
 388
 389static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 390                                  TCGv_i64 dst)
 391{
 392    if (live_cc_data(s)) {
 393        tcg_gen_discard_i64(cc_vr);
 394    }
 395    tcg_gen_mov_i64(cc_src, src);
 396    tcg_gen_mov_i64(cc_dst, dst);
 397    s->cc_op = op;
 398}
 399
 400static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 401                                  TCGv_i64 dst, TCGv_i64 vr)
 402{
 403    tcg_gen_mov_i64(cc_src, src);
 404    tcg_gen_mov_i64(cc_dst, dst);
 405    tcg_gen_mov_i64(cc_vr, vr);
 406    s->cc_op = op;
 407}
 408
 409static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
 410{
 411    gen_op_update1_cc_i64(s, CC_OP_NZ, val);
 412}
 413
 414static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
 415{
 416    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
 417}
 418
 419static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
 420{
 421    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
 422}
 423
 424static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
 425{
 426    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
 427}
 428
 429/* CC value is in env->cc_op */
 430static void set_cc_static(DisasContext *s)
 431{
 432    if (live_cc_data(s)) {
 433        tcg_gen_discard_i64(cc_src);
 434        tcg_gen_discard_i64(cc_dst);
 435        tcg_gen_discard_i64(cc_vr);
 436    }
 437    s->cc_op = CC_OP_STATIC;
 438}
 439
 440/* calculates cc into cc_op */
 441static void gen_op_calc_cc(DisasContext *s)
 442{
 443    TCGv_i32 local_cc_op;
 444    TCGv_i64 dummy;
 445
 446    TCGV_UNUSED_I32(local_cc_op);
 447    TCGV_UNUSED_I64(dummy);
 448    switch (s->cc_op) {
 449    default:
 450        dummy = tcg_const_i64(0);
 451        /* FALLTHRU */
 452    case CC_OP_ADD_64:
 453    case CC_OP_ADDU_64:
 454    case CC_OP_ADDC_64:
 455    case CC_OP_SUB_64:
 456    case CC_OP_SUBU_64:
 457    case CC_OP_SUBB_64:
 458    case CC_OP_ADD_32:
 459    case CC_OP_ADDU_32:
 460    case CC_OP_ADDC_32:
 461    case CC_OP_SUB_32:
 462    case CC_OP_SUBU_32:
 463    case CC_OP_SUBB_32:
 464        local_cc_op = tcg_const_i32(s->cc_op);
 465        break;
 466    case CC_OP_CONST0:
 467    case CC_OP_CONST1:
 468    case CC_OP_CONST2:
 469    case CC_OP_CONST3:
 470    case CC_OP_STATIC:
 471    case CC_OP_DYNAMIC:
 472        break;
 473    }
 474
 475    switch (s->cc_op) {
 476    case CC_OP_CONST0:
 477    case CC_OP_CONST1:
 478    case CC_OP_CONST2:
 479    case CC_OP_CONST3:
 480        /* s->cc_op is the cc value */
 481        tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
 482        break;
 483    case CC_OP_STATIC:
 484        /* env->cc_op already is the cc value */
 485        break;
 486    case CC_OP_NZ:
 487    case CC_OP_ABS_64:
 488    case CC_OP_NABS_64:
 489    case CC_OP_ABS_32:
 490    case CC_OP_NABS_32:
 491    case CC_OP_LTGT0_32:
 492    case CC_OP_LTGT0_64:
 493    case CC_OP_COMP_32:
 494    case CC_OP_COMP_64:
 495    case CC_OP_NZ_F32:
 496    case CC_OP_NZ_F64:
 497    case CC_OP_FLOGR:
 498        /* 1 argument */
 499        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
 500        break;
 501    case CC_OP_ICM:
 502    case CC_OP_LTGT_32:
 503    case CC_OP_LTGT_64:
 504    case CC_OP_LTUGTU_32:
 505    case CC_OP_LTUGTU_64:
 506    case CC_OP_TM_32:
 507    case CC_OP_TM_64:
 508    case CC_OP_SLA_32:
 509    case CC_OP_SLA_64:
 510    case CC_OP_NZ_F128:
 511        /* 2 arguments */
 512        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
 513        break;
 514    case CC_OP_ADD_64:
 515    case CC_OP_ADDU_64:
 516    case CC_OP_ADDC_64:
 517    case CC_OP_SUB_64:
 518    case CC_OP_SUBU_64:
 519    case CC_OP_SUBB_64:
 520    case CC_OP_ADD_32:
 521    case CC_OP_ADDU_32:
 522    case CC_OP_ADDC_32:
 523    case CC_OP_SUB_32:
 524    case CC_OP_SUBU_32:
 525    case CC_OP_SUBB_32:
 526        /* 3 arguments */
 527        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
 528        break;
 529    case CC_OP_DYNAMIC:
 530        /* unknown operation - assume 3 arguments and cc_op in env */
 531        gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
 532        break;
 533    default:
 534        tcg_abort();
 535    }
 536
 537    if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
 538        tcg_temp_free_i32(local_cc_op);
 539    }
 540    if (!TCGV_IS_UNUSED_I64(dummy)) {
 541        tcg_temp_free_i64(dummy);
 542    }
 543
 544    /* We now have cc in cc_op as constant */
 545    set_cc_static(s);
 546}
 547
 548static bool use_exit_tb(DisasContext *s)
 549{
 550    return (s->singlestep_enabled ||
 551            (tb_cflags(s->tb) & CF_LAST_IO) ||
 552            (s->tb->flags & FLAG_MASK_PER));
 553}
 554
 555static bool use_goto_tb(DisasContext *s, uint64_t dest)
 556{
 557    if (unlikely(use_exit_tb(s))) {
 558        return false;
 559    }
 560#ifndef CONFIG_USER_ONLY
 561    return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
 562           (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
 563#else
 564    return true;
 565#endif
 566}
 567
 568static void account_noninline_branch(DisasContext *s, int cc_op)
 569{
 570#ifdef DEBUG_INLINE_BRANCHES
 571    inline_branch_miss[cc_op]++;
 572#endif
 573}
 574
 575static void account_inline_branch(DisasContext *s, int cc_op)
 576{
 577#ifdef DEBUG_INLINE_BRANCHES
 578    inline_branch_hit[cc_op]++;
 579#endif
 580}
 581
 582/* Table of mask values to comparison codes, given a comparison as input.
 583   For such, CC=3 should not be possible.  */
 584static const TCGCond ltgt_cond[16] = {
 585    TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
 586    TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
 587    TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
 588    TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
 589    TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
 590    TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
 591    TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
 592    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
 593};
 594
 595/* Table of mask values to comparison codes, given a logic op as input.
 596   For such, only CC=0 and CC=1 should be possible.  */
 597static const TCGCond nz_cond[16] = {
 598    TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
 599    TCG_COND_NEVER, TCG_COND_NEVER,
 600    TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
 601    TCG_COND_NE, TCG_COND_NE,
 602    TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
 603    TCG_COND_EQ, TCG_COND_EQ,
 604    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
 605    TCG_COND_ALWAYS, TCG_COND_ALWAYS,
 606};
 607
 608/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
 609   details required to generate a TCG comparison.  */
 610static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
 611{
 612    TCGCond cond;
 613    enum cc_op old_cc_op = s->cc_op;
 614
 615    if (mask == 15 || mask == 0) {
 616        c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
 617        c->u.s32.a = cc_op;
 618        c->u.s32.b = cc_op;
 619        c->g1 = c->g2 = true;
 620        c->is_64 = false;
 621        return;
 622    }
 623
 624    /* Find the TCG condition for the mask + cc op.  */
 625    switch (old_cc_op) {
 626    case CC_OP_LTGT0_32:
 627    case CC_OP_LTGT0_64:
 628    case CC_OP_LTGT_32:
 629    case CC_OP_LTGT_64:
 630        cond = ltgt_cond[mask];
 631        if (cond == TCG_COND_NEVER) {
 632            goto do_dynamic;
 633        }
 634        account_inline_branch(s, old_cc_op);
 635        break;
 636
 637    case CC_OP_LTUGTU_32:
 638    case CC_OP_LTUGTU_64:
 639        cond = tcg_unsigned_cond(ltgt_cond[mask]);
 640        if (cond == TCG_COND_NEVER) {
 641            goto do_dynamic;
 642        }
 643        account_inline_branch(s, old_cc_op);
 644        break;
 645
 646    case CC_OP_NZ:
 647        cond = nz_cond[mask];
 648        if (cond == TCG_COND_NEVER) {
 649            goto do_dynamic;
 650        }
 651        account_inline_branch(s, old_cc_op);
 652        break;
 653
 654    case CC_OP_TM_32:
 655    case CC_OP_TM_64:
 656        switch (mask) {
 657        case 8:
 658            cond = TCG_COND_EQ;
 659            break;
 660        case 4 | 2 | 1:
 661            cond = TCG_COND_NE;
 662            break;
 663        default:
 664            goto do_dynamic;
 665        }
 666        account_inline_branch(s, old_cc_op);
 667        break;
 668
 669    case CC_OP_ICM:
 670        switch (mask) {
 671        case 8:
 672            cond = TCG_COND_EQ;
 673            break;
 674        case 4 | 2 | 1:
 675        case 4 | 2:
 676            cond = TCG_COND_NE;
 677            break;
 678        default:
 679            goto do_dynamic;
 680        }
 681        account_inline_branch(s, old_cc_op);
 682        break;
 683
 684    case CC_OP_FLOGR:
 685        switch (mask & 0xa) {
 686        case 8: /* src == 0 -> no one bit found */
 687            cond = TCG_COND_EQ;
 688            break;
 689        case 2: /* src != 0 -> one bit found */
 690            cond = TCG_COND_NE;
 691            break;
 692        default:
 693            goto do_dynamic;
 694        }
 695        account_inline_branch(s, old_cc_op);
 696        break;
 697
 698    case CC_OP_ADDU_32:
 699    case CC_OP_ADDU_64:
 700        switch (mask) {
 701        case 8 | 2: /* vr == 0 */
 702            cond = TCG_COND_EQ;
 703            break;
 704        case 4 | 1: /* vr != 0 */
 705            cond = TCG_COND_NE;
 706            break;
 707        case 8 | 4: /* no carry -> vr >= src */
 708            cond = TCG_COND_GEU;
 709            break;
 710        case 2 | 1: /* carry -> vr < src */
 711            cond = TCG_COND_LTU;
 712            break;
 713        default:
 714            goto do_dynamic;
 715        }
 716        account_inline_branch(s, old_cc_op);
 717        break;
 718
 719    case CC_OP_SUBU_32:
 720    case CC_OP_SUBU_64:
 721        /* Note that CC=0 is impossible; treat it as dont-care.  */
 722        switch (mask & 7) {
 723        case 2: /* zero -> op1 == op2 */
 724            cond = TCG_COND_EQ;
 725            break;
 726        case 4 | 1: /* !zero -> op1 != op2 */
 727            cond = TCG_COND_NE;
 728            break;
 729        case 4: /* borrow (!carry) -> op1 < op2 */
 730            cond = TCG_COND_LTU;
 731            break;
 732        case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
 733            cond = TCG_COND_GEU;
 734            break;
 735        default:
 736            goto do_dynamic;
 737        }
 738        account_inline_branch(s, old_cc_op);
 739        break;
 740
 741    default:
 742    do_dynamic:
 743        /* Calculate cc value.  */
 744        gen_op_calc_cc(s);
 745        /* FALLTHRU */
 746
 747    case CC_OP_STATIC:
 748        /* Jump based on CC.  We'll load up the real cond below;
 749           the assignment here merely avoids a compiler warning.  */
 750        account_noninline_branch(s, old_cc_op);
 751        old_cc_op = CC_OP_STATIC;
 752        cond = TCG_COND_NEVER;
 753        break;
 754    }
 755
 756    /* Load up the arguments of the comparison.  */
 757    c->is_64 = true;
 758    c->g1 = c->g2 = false;
 759    switch (old_cc_op) {
 760    case CC_OP_LTGT0_32:
 761        c->is_64 = false;
 762        c->u.s32.a = tcg_temp_new_i32();
 763        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
 764        c->u.s32.b = tcg_const_i32(0);
 765        break;
 766    case CC_OP_LTGT_32:
 767    case CC_OP_LTUGTU_32:
 768    case CC_OP_SUBU_32:
 769        c->is_64 = false;
 770        c->u.s32.a = tcg_temp_new_i32();
 771        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
 772        c->u.s32.b = tcg_temp_new_i32();
 773        tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
 774        break;
 775
 776    case CC_OP_LTGT0_64:
 777    case CC_OP_NZ:
 778    case CC_OP_FLOGR:
 779        c->u.s64.a = cc_dst;
 780        c->u.s64.b = tcg_const_i64(0);
 781        c->g1 = true;
 782        break;
 783    case CC_OP_LTGT_64:
 784    case CC_OP_LTUGTU_64:
 785    case CC_OP_SUBU_64:
 786        c->u.s64.a = cc_src;
 787        c->u.s64.b = cc_dst;
 788        c->g1 = c->g2 = true;
 789        break;
 790
 791    case CC_OP_TM_32:
 792    case CC_OP_TM_64:
 793    case CC_OP_ICM:
 794        c->u.s64.a = tcg_temp_new_i64();
 795        c->u.s64.b = tcg_const_i64(0);
 796        tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
 797        break;
 798
 799    case CC_OP_ADDU_32:
 800        c->is_64 = false;
 801        c->u.s32.a = tcg_temp_new_i32();
 802        c->u.s32.b = tcg_temp_new_i32();
 803        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
 804        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
 805            tcg_gen_movi_i32(c->u.s32.b, 0);
 806        } else {
 807            tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
 808        }
 809        break;
 810
 811    case CC_OP_ADDU_64:
 812        c->u.s64.a = cc_vr;
 813        c->g1 = true;
 814        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
 815            c->u.s64.b = tcg_const_i64(0);
 816        } else {
 817            c->u.s64.b = cc_src;
 818            c->g2 = true;
 819        }
 820        break;
 821
 822    case CC_OP_STATIC:
 823        c->is_64 = false;
 824        c->u.s32.a = cc_op;
 825        c->g1 = true;
 826        switch (mask) {
 827        case 0x8 | 0x4 | 0x2: /* cc != 3 */
 828            cond = TCG_COND_NE;
 829            c->u.s32.b = tcg_const_i32(3);
 830            break;
 831        case 0x8 | 0x4 | 0x1: /* cc != 2 */
 832            cond = TCG_COND_NE;
 833            c->u.s32.b = tcg_const_i32(2);
 834            break;
 835        case 0x8 | 0x2 | 0x1: /* cc != 1 */
 836            cond = TCG_COND_NE;
 837            c->u.s32.b = tcg_const_i32(1);
 838            break;
 839        case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
 840            cond = TCG_COND_EQ;
 841            c->g1 = false;
 842            c->u.s32.a = tcg_temp_new_i32();
 843            c->u.s32.b = tcg_const_i32(0);
 844            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 845            break;
 846        case 0x8 | 0x4: /* cc < 2 */
 847            cond = TCG_COND_LTU;
 848            c->u.s32.b = tcg_const_i32(2);
 849            break;
 850        case 0x8: /* cc == 0 */
 851            cond = TCG_COND_EQ;
 852            c->u.s32.b = tcg_const_i32(0);
 853            break;
 854        case 0x4 | 0x2 | 0x1: /* cc != 0 */
 855            cond = TCG_COND_NE;
 856            c->u.s32.b = tcg_const_i32(0);
 857            break;
 858        case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
 859            cond = TCG_COND_NE;
 860            c->g1 = false;
 861            c->u.s32.a = tcg_temp_new_i32();
 862            c->u.s32.b = tcg_const_i32(0);
 863            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 864            break;
 865        case 0x4: /* cc == 1 */
 866            cond = TCG_COND_EQ;
 867            c->u.s32.b = tcg_const_i32(1);
 868            break;
 869        case 0x2 | 0x1: /* cc > 1 */
 870            cond = TCG_COND_GTU;
 871            c->u.s32.b = tcg_const_i32(1);
 872            break;
 873        case 0x2: /* cc == 2 */
 874            cond = TCG_COND_EQ;
 875            c->u.s32.b = tcg_const_i32(2);
 876            break;
 877        case 0x1: /* cc == 3 */
 878            cond = TCG_COND_EQ;
 879            c->u.s32.b = tcg_const_i32(3);
 880            break;
 881        default:
 882            /* CC is masked by something else: (8 >> cc) & mask.  */
 883            cond = TCG_COND_NE;
 884            c->g1 = false;
 885            c->u.s32.a = tcg_const_i32(8);
 886            c->u.s32.b = tcg_const_i32(0);
 887            tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
 888            tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
 889            break;
 890        }
 891        break;
 892
 893    default:
 894        abort();
 895    }
 896    c->cond = cond;
 897}
 898
 899static void free_compare(DisasCompare *c)
 900{
 901    if (!c->g1) {
 902        if (c->is_64) {
 903            tcg_temp_free_i64(c->u.s64.a);
 904        } else {
 905            tcg_temp_free_i32(c->u.s32.a);
 906        }
 907    }
 908    if (!c->g2) {
 909        if (c->is_64) {
 910            tcg_temp_free_i64(c->u.s64.b);
 911        } else {
 912            tcg_temp_free_i32(c->u.s32.b);
 913        }
 914    }
 915}
 916
 917/* ====================================================================== */
 918/* Define the insn format enumeration.  */
 919#define F0(N)                         FMT_##N,
 920#define F1(N, X1)                     F0(N)
 921#define F2(N, X1, X2)                 F0(N)
 922#define F3(N, X1, X2, X3)             F0(N)
 923#define F4(N, X1, X2, X3, X4)         F0(N)
 924#define F5(N, X1, X2, X3, X4, X5)     F0(N)
 925
 926typedef enum {
 927#include "insn-format.def"
 928} DisasFormat;
 929
 930#undef F0
 931#undef F1
 932#undef F2
 933#undef F3
 934#undef F4
 935#undef F5
 936
 937/* Define a structure to hold the decoded fields.  We'll store each inside
 938   an array indexed by an enum.  In order to conserve memory, we'll arrange
 939   for fields that do not exist at the same time to overlap, thus the "C"
 940   for compact.  For checking purposes there is an "O" for original index
 941   as well that will be applied to availability bitmaps.  */
 942
 943enum DisasFieldIndexO {
 944    FLD_O_r1,
 945    FLD_O_r2,
 946    FLD_O_r3,
 947    FLD_O_m1,
 948    FLD_O_m3,
 949    FLD_O_m4,
 950    FLD_O_b1,
 951    FLD_O_b2,
 952    FLD_O_b4,
 953    FLD_O_d1,
 954    FLD_O_d2,
 955    FLD_O_d4,
 956    FLD_O_x2,
 957    FLD_O_l1,
 958    FLD_O_l2,
 959    FLD_O_i1,
 960    FLD_O_i2,
 961    FLD_O_i3,
 962    FLD_O_i4,
 963    FLD_O_i5
 964};
 965
 966enum DisasFieldIndexC {
 967    FLD_C_r1 = 0,
 968    FLD_C_m1 = 0,
 969    FLD_C_b1 = 0,
 970    FLD_C_i1 = 0,
 971
 972    FLD_C_r2 = 1,
 973    FLD_C_b2 = 1,
 974    FLD_C_i2 = 1,
 975
 976    FLD_C_r3 = 2,
 977    FLD_C_m3 = 2,
 978    FLD_C_i3 = 2,
 979
 980    FLD_C_m4 = 3,
 981    FLD_C_b4 = 3,
 982    FLD_C_i4 = 3,
 983    FLD_C_l1 = 3,
 984
 985    FLD_C_i5 = 4,
 986    FLD_C_d1 = 4,
 987
 988    FLD_C_d2 = 5,
 989
 990    FLD_C_d4 = 6,
 991    FLD_C_x2 = 6,
 992    FLD_C_l2 = 6,
 993
 994    NUM_C_FIELD = 7
 995};
 996
 997struct DisasFields {
 998    uint64_t raw_insn;
 999    unsigned op:8;
1000    unsigned op2:8;
1001    unsigned presentC:16;
1002    unsigned int presentO;
1003    int c[NUM_C_FIELD];
1004};
1005
1006/* This is the way fields are to be accessed out of DisasFields.  */
1007#define have_field(S, F)  have_field1((S), FLD_O_##F)
1008#define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1009
1010static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1011{
1012    return (f->presentO >> c) & 1;
1013}
1014
1015static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1016                      enum DisasFieldIndexC c)
1017{
1018    assert(have_field1(f, o));
1019    return f->c[c];
1020}
1021
1022/* Describe the layout of each field in each format.  */
1023typedef struct DisasField {
1024    unsigned int beg:8;
1025    unsigned int size:8;
1026    unsigned int type:2;
1027    unsigned int indexC:6;
1028    enum DisasFieldIndexO indexO:8;
1029} DisasField;
1030
1031typedef struct DisasFormatInfo {
1032    DisasField op[NUM_C_FIELD];
1033} DisasFormatInfo;
1034
1035#define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1036#define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1037#define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1038                      { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1039#define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1040                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1041                      { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1042#define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1043                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1044#define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1045                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1046                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1047#define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1048#define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1049
1050#define F0(N)                     { { } },
1051#define F1(N, X1)                 { { X1 } },
1052#define F2(N, X1, X2)             { { X1, X2 } },
1053#define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1054#define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1055#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1056
1057static const DisasFormatInfo format_info[] = {
1058#include "insn-format.def"
1059};
1060
1061#undef F0
1062#undef F1
1063#undef F2
1064#undef F3
1065#undef F4
1066#undef F5
1067#undef R
1068#undef M
1069#undef BD
1070#undef BXD
1071#undef BDL
1072#undef BXDL
1073#undef I
1074#undef L
1075
1076/* Generally, we'll extract operands into this structures, operate upon
1077   them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1078   of routines below for more details.  */
1079typedef struct {
1080    bool g_out, g_out2, g_in1, g_in2;
1081    TCGv_i64 out, out2, in1, in2;
1082    TCGv_i64 addr1;
1083} DisasOps;
1084
1085/* Instructions can place constraints on their operands, raising specification
1086   exceptions if they are violated.  To make this easy to automate, each "in1",
1087   "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1088   of the following, or 0.  To make this easy to document, we'll put the
1089   SPEC_<name> defines next to <name>.  */
1090
1091#define SPEC_r1_even    1
1092#define SPEC_r2_even    2
1093#define SPEC_r3_even    4
1094#define SPEC_r1_f128    8
1095#define SPEC_r2_f128    16
1096
1097/* Return values from translate_one, indicating the state of the TB.  */
1098typedef enum {
1099    /* Continue the TB.  */
1100    NO_EXIT,
1101    /* We have emitted one or more goto_tb.  No fixup required.  */
1102    EXIT_GOTO_TB,
1103    /* We are not using a goto_tb (for whatever reason), but have updated
1104       the PC (for whatever reason), so there's no need to do it again on
1105       exiting the TB.  */
1106    EXIT_PC_UPDATED,
1107    /* We have updated the PC and CC values.  */
1108    EXIT_PC_CC_UPDATED,
1109    /* We are exiting the TB, but have neither emitted a goto_tb, nor
1110       updated the PC for the next instruction to be executed.  */
1111    EXIT_PC_STALE,
1112    /* We are exiting the TB to the main loop.  */
1113    EXIT_PC_STALE_NOCHAIN,
1114    /* We are ending the TB with a noreturn function call, e.g. longjmp.
1115       No following code will be executed.  */
1116    EXIT_NORETURN,
1117} ExitStatus;
1118
1119struct DisasInsn {
1120    unsigned opc:16;
1121    DisasFormat fmt:8;
1122    unsigned fac:8;
1123    unsigned spec:8;
1124
1125    const char *name;
1126
1127    void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1128    void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1129    void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1130    void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1131    void (*help_cout)(DisasContext *, DisasOps *);
1132    ExitStatus (*help_op)(DisasContext *, DisasOps *);
1133
1134    uint64_t data;
1135};
1136
1137/* ====================================================================== */
1138/* Miscellaneous helpers, used by several operations.  */
1139
1140static void help_l2_shift(DisasContext *s, DisasFields *f,
1141                          DisasOps *o, int mask)
1142{
1143    int b2 = get_field(f, b2);
1144    int d2 = get_field(f, d2);
1145
1146    if (b2 == 0) {
1147        o->in2 = tcg_const_i64(d2 & mask);
1148    } else {
1149        o->in2 = get_address(s, 0, b2, d2);
1150        tcg_gen_andi_i64(o->in2, o->in2, mask);
1151    }
1152}
1153
1154static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1155{
1156    if (dest == s->next_pc) {
1157        per_branch(s, true);
1158        return NO_EXIT;
1159    }
1160    if (use_goto_tb(s, dest)) {
1161        update_cc_op(s);
1162        per_breaking_event(s);
1163        tcg_gen_goto_tb(0);
1164        tcg_gen_movi_i64(psw_addr, dest);
1165        tcg_gen_exit_tb((uintptr_t)s->tb);
1166        return EXIT_GOTO_TB;
1167    } else {
1168        tcg_gen_movi_i64(psw_addr, dest);
1169        per_branch(s, false);
1170        return EXIT_PC_UPDATED;
1171    }
1172}
1173
1174static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1175                              bool is_imm, int imm, TCGv_i64 cdest)
1176{
1177    ExitStatus ret;
1178    uint64_t dest = s->pc + 2 * imm;
1179    TCGLabel *lab;
1180
1181    /* Take care of the special cases first.  */
1182    if (c->cond == TCG_COND_NEVER) {
1183        ret = NO_EXIT;
1184        goto egress;
1185    }
1186    if (is_imm) {
1187        if (dest == s->next_pc) {
1188            /* Branch to next.  */
1189            per_branch(s, true);
1190            ret = NO_EXIT;
1191            goto egress;
1192        }
1193        if (c->cond == TCG_COND_ALWAYS) {
1194            ret = help_goto_direct(s, dest);
1195            goto egress;
1196        }
1197    } else {
1198        if (TCGV_IS_UNUSED_I64(cdest)) {
1199            /* E.g. bcr %r0 -> no branch.  */
1200            ret = NO_EXIT;
1201            goto egress;
1202        }
1203        if (c->cond == TCG_COND_ALWAYS) {
1204            tcg_gen_mov_i64(psw_addr, cdest);
1205            per_branch(s, false);
1206            ret = EXIT_PC_UPDATED;
1207            goto egress;
1208        }
1209    }
1210
1211    if (use_goto_tb(s, s->next_pc)) {
1212        if (is_imm && use_goto_tb(s, dest)) {
1213            /* Both exits can use goto_tb.  */
1214            update_cc_op(s);
1215
1216            lab = gen_new_label();
1217            if (c->is_64) {
1218                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1219            } else {
1220                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1221            }
1222
1223            /* Branch not taken.  */
1224            tcg_gen_goto_tb(0);
1225            tcg_gen_movi_i64(psw_addr, s->next_pc);
1226            tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1227
1228            /* Branch taken.  */
1229            gen_set_label(lab);
1230            per_breaking_event(s);
1231            tcg_gen_goto_tb(1);
1232            tcg_gen_movi_i64(psw_addr, dest);
1233            tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1234
1235            ret = EXIT_GOTO_TB;
1236        } else {
1237            /* Fallthru can use goto_tb, but taken branch cannot.  */
1238            /* Store taken branch destination before the brcond.  This
1239               avoids having to allocate a new local temp to hold it.
1240               We'll overwrite this in the not taken case anyway.  */
1241            if (!is_imm) {
1242                tcg_gen_mov_i64(psw_addr, cdest);
1243            }
1244
1245            lab = gen_new_label();
1246            if (c->is_64) {
1247                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1248            } else {
1249                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1250            }
1251
1252            /* Branch not taken.  */
1253            update_cc_op(s);
1254            tcg_gen_goto_tb(0);
1255            tcg_gen_movi_i64(psw_addr, s->next_pc);
1256            tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1257
1258            gen_set_label(lab);
1259            if (is_imm) {
1260                tcg_gen_movi_i64(psw_addr, dest);
1261            }
1262            per_breaking_event(s);
1263            ret = EXIT_PC_UPDATED;
1264        }
1265    } else {
1266        /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1267           Most commonly we're single-stepping or some other condition that
1268           disables all use of goto_tb.  Just update the PC and exit.  */
1269
1270        TCGv_i64 next = tcg_const_i64(s->next_pc);
1271        if (is_imm) {
1272            cdest = tcg_const_i64(dest);
1273        }
1274
1275        if (c->is_64) {
1276            tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1277                                cdest, next);
1278            per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1279        } else {
1280            TCGv_i32 t0 = tcg_temp_new_i32();
1281            TCGv_i64 t1 = tcg_temp_new_i64();
1282            TCGv_i64 z = tcg_const_i64(0);
1283            tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1284            tcg_gen_extu_i32_i64(t1, t0);
1285            tcg_temp_free_i32(t0);
1286            tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1287            per_branch_cond(s, TCG_COND_NE, t1, z);
1288            tcg_temp_free_i64(t1);
1289            tcg_temp_free_i64(z);
1290        }
1291
1292        if (is_imm) {
1293            tcg_temp_free_i64(cdest);
1294        }
1295        tcg_temp_free_i64(next);
1296
1297        ret = EXIT_PC_UPDATED;
1298    }
1299
1300 egress:
1301    free_compare(c);
1302    return ret;
1303}
1304
1305/* ====================================================================== */
1306/* The operations.  These perform the bulk of the work for any insn,
1307   usually after the operands have been loaded and output initialized.  */
1308
1309static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1310{
1311    TCGv_i64 z, n;
1312    z = tcg_const_i64(0);
1313    n = tcg_temp_new_i64();
1314    tcg_gen_neg_i64(n, o->in2);
1315    tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1316    tcg_temp_free_i64(n);
1317    tcg_temp_free_i64(z);
1318    return NO_EXIT;
1319}
1320
1321static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1322{
1323    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1324    return NO_EXIT;
1325}
1326
1327static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1328{
1329    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1330    return NO_EXIT;
1331}
1332
1333static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1334{
1335    tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1336    tcg_gen_mov_i64(o->out2, o->in2);
1337    return NO_EXIT;
1338}
1339
1340static ExitStatus op_add(DisasContext *s, DisasOps *o)
1341{
1342    tcg_gen_add_i64(o->out, o->in1, o->in2);
1343    return NO_EXIT;
1344}
1345
1346static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1347{
1348    DisasCompare cmp;
1349    TCGv_i64 carry;
1350
1351    tcg_gen_add_i64(o->out, o->in1, o->in2);
1352
1353    /* The carry flag is the msb of CC, therefore the branch mask that would
1354       create that comparison is 3.  Feeding the generated comparison to
1355       setcond produces the carry flag that we desire.  */
1356    disas_jcc(s, &cmp, 3);
1357    carry = tcg_temp_new_i64();
1358    if (cmp.is_64) {
1359        tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1360    } else {
1361        TCGv_i32 t = tcg_temp_new_i32();
1362        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1363        tcg_gen_extu_i32_i64(carry, t);
1364        tcg_temp_free_i32(t);
1365    }
1366    free_compare(&cmp);
1367
1368    tcg_gen_add_i64(o->out, o->out, carry);
1369    tcg_temp_free_i64(carry);
1370    return NO_EXIT;
1371}
1372
1373static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1374{
1375    gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1376    return NO_EXIT;
1377}
1378
1379static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1380{
1381    gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1382    return NO_EXIT;
1383}
1384
1385static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1386{
1387    gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1388    return_low128(o->out2);
1389    return NO_EXIT;
1390}
1391
1392static ExitStatus op_and(DisasContext *s, DisasOps *o)
1393{
1394    tcg_gen_and_i64(o->out, o->in1, o->in2);
1395    return NO_EXIT;
1396}
1397
1398static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1399{
1400    int shift = s->insn->data & 0xff;
1401    int size = s->insn->data >> 8;
1402    uint64_t mask = ((1ull << size) - 1) << shift;
1403
1404    assert(!o->g_in2);
1405    tcg_gen_shli_i64(o->in2, o->in2, shift);
1406    tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1407    tcg_gen_and_i64(o->out, o->in1, o->in2);
1408
1409    /* Produce the CC from only the bits manipulated.  */
1410    tcg_gen_andi_i64(cc_dst, o->out, mask);
1411    set_cc_nz_u64(s, cc_dst);
1412    return NO_EXIT;
1413}
1414
1415static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1416{
1417    tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1418    if (!TCGV_IS_UNUSED_I64(o->in2)) {
1419        tcg_gen_mov_i64(psw_addr, o->in2);
1420        per_branch(s, false);
1421        return EXIT_PC_UPDATED;
1422    } else {
1423        return NO_EXIT;
1424    }
1425}
1426
1427static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1428{
1429    tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1430    return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1431}
1432
1433static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1434{
1435    int m1 = get_field(s->fields, m1);
1436    bool is_imm = have_field(s->fields, i2);
1437    int imm = is_imm ? get_field(s->fields, i2) : 0;
1438    DisasCompare c;
1439
1440    /* BCR with R2 = 0 causes no branching */
1441    if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1442        if (m1 == 14) {
1443            /* Perform serialization */
1444            /* FIXME: check for fast-BCR-serialization facility */
1445            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1446        }
1447        if (m1 == 15) {
1448            /* Perform serialization */
1449            /* FIXME: perform checkpoint-synchronisation */
1450            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1451        }
1452        return NO_EXIT;
1453    }
1454
1455    disas_jcc(s, &c, m1);
1456    return help_branch(s, &c, is_imm, imm, o->in2);
1457}
1458
1459static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1460{
1461    int r1 = get_field(s->fields, r1);
1462    bool is_imm = have_field(s->fields, i2);
1463    int imm = is_imm ? get_field(s->fields, i2) : 0;
1464    DisasCompare c;
1465    TCGv_i64 t;
1466
1467    c.cond = TCG_COND_NE;
1468    c.is_64 = false;
1469    c.g1 = false;
1470    c.g2 = false;
1471
1472    t = tcg_temp_new_i64();
1473    tcg_gen_subi_i64(t, regs[r1], 1);
1474    store_reg32_i64(r1, t);
1475    c.u.s32.a = tcg_temp_new_i32();
1476    c.u.s32.b = tcg_const_i32(0);
1477    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1478    tcg_temp_free_i64(t);
1479
1480    return help_branch(s, &c, is_imm, imm, o->in2);
1481}
1482
1483static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1484{
1485    int r1 = get_field(s->fields, r1);
1486    int imm = get_field(s->fields, i2);
1487    DisasCompare c;
1488    TCGv_i64 t;
1489
1490    c.cond = TCG_COND_NE;
1491    c.is_64 = false;
1492    c.g1 = false;
1493    c.g2 = false;
1494
1495    t = tcg_temp_new_i64();
1496    tcg_gen_shri_i64(t, regs[r1], 32);
1497    tcg_gen_subi_i64(t, t, 1);
1498    store_reg32h_i64(r1, t);
1499    c.u.s32.a = tcg_temp_new_i32();
1500    c.u.s32.b = tcg_const_i32(0);
1501    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1502    tcg_temp_free_i64(t);
1503
1504    return help_branch(s, &c, 1, imm, o->in2);
1505}
1506
1507static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1508{
1509    int r1 = get_field(s->fields, r1);
1510    bool is_imm = have_field(s->fields, i2);
1511    int imm = is_imm ? get_field(s->fields, i2) : 0;
1512    DisasCompare c;
1513
1514    c.cond = TCG_COND_NE;
1515    c.is_64 = true;
1516    c.g1 = true;
1517    c.g2 = false;
1518
1519    tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1520    c.u.s64.a = regs[r1];
1521    c.u.s64.b = tcg_const_i64(0);
1522
1523    return help_branch(s, &c, is_imm, imm, o->in2);
1524}
1525
1526static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1527{
1528    int r1 = get_field(s->fields, r1);
1529    int r3 = get_field(s->fields, r3);
1530    bool is_imm = have_field(s->fields, i2);
1531    int imm = is_imm ? get_field(s->fields, i2) : 0;
1532    DisasCompare c;
1533    TCGv_i64 t;
1534
1535    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1536    c.is_64 = false;
1537    c.g1 = false;
1538    c.g2 = false;
1539
1540    t = tcg_temp_new_i64();
1541    tcg_gen_add_i64(t, regs[r1], regs[r3]);
1542    c.u.s32.a = tcg_temp_new_i32();
1543    c.u.s32.b = tcg_temp_new_i32();
1544    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1545    tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1546    store_reg32_i64(r1, t);
1547    tcg_temp_free_i64(t);
1548
1549    return help_branch(s, &c, is_imm, imm, o->in2);
1550}
1551
1552static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1553{
1554    int r1 = get_field(s->fields, r1);
1555    int r3 = get_field(s->fields, r3);
1556    bool is_imm = have_field(s->fields, i2);
1557    int imm = is_imm ? get_field(s->fields, i2) : 0;
1558    DisasCompare c;
1559
1560    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1561    c.is_64 = true;
1562
1563    if (r1 == (r3 | 1)) {
1564        c.u.s64.b = load_reg(r3 | 1);
1565        c.g2 = false;
1566    } else {
1567        c.u.s64.b = regs[r3 | 1];
1568        c.g2 = true;
1569    }
1570
1571    tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1572    c.u.s64.a = regs[r1];
1573    c.g1 = true;
1574
1575    return help_branch(s, &c, is_imm, imm, o->in2);
1576}
1577
1578static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1579{
1580    int imm, m3 = get_field(s->fields, m3);
1581    bool is_imm;
1582    DisasCompare c;
1583
1584    c.cond = ltgt_cond[m3];
1585    if (s->insn->data) {
1586        c.cond = tcg_unsigned_cond(c.cond);
1587    }
1588    c.is_64 = c.g1 = c.g2 = true;
1589    c.u.s64.a = o->in1;
1590    c.u.s64.b = o->in2;
1591
1592    is_imm = have_field(s->fields, i4);
1593    if (is_imm) {
1594        imm = get_field(s->fields, i4);
1595    } else {
1596        imm = 0;
1597        o->out = get_address(s, 0, get_field(s->fields, b4),
1598                             get_field(s->fields, d4));
1599    }
1600
1601    return help_branch(s, &c, is_imm, imm, o->out);
1602}
1603
1604static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1605{
1606    gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1607    set_cc_static(s);
1608    return NO_EXIT;
1609}
1610
1611static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1612{
1613    gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1614    set_cc_static(s);
1615    return NO_EXIT;
1616}
1617
1618static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1619{
1620    gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1621    set_cc_static(s);
1622    return NO_EXIT;
1623}
1624
1625static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1626{
1627    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1628    gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1629    tcg_temp_free_i32(m3);
1630    gen_set_cc_nz_f32(s, o->in2);
1631    return NO_EXIT;
1632}
1633
1634static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1635{
1636    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1637    gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1638    tcg_temp_free_i32(m3);
1639    gen_set_cc_nz_f64(s, o->in2);
1640    return NO_EXIT;
1641}
1642
1643static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1644{
1645    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1646    gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1647    tcg_temp_free_i32(m3);
1648    gen_set_cc_nz_f128(s, o->in1, o->in2);
1649    return NO_EXIT;
1650}
1651
1652static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1653{
1654    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1655    gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1656    tcg_temp_free_i32(m3);
1657    gen_set_cc_nz_f32(s, o->in2);
1658    return NO_EXIT;
1659}
1660
1661static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1662{
1663    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1664    gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1665    tcg_temp_free_i32(m3);
1666    gen_set_cc_nz_f64(s, o->in2);
1667    return NO_EXIT;
1668}
1669
1670static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1671{
1672    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1673    gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1674    tcg_temp_free_i32(m3);
1675    gen_set_cc_nz_f128(s, o->in1, o->in2);
1676    return NO_EXIT;
1677}
1678
1679static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1680{
1681    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1682    gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1683    tcg_temp_free_i32(m3);
1684    gen_set_cc_nz_f32(s, o->in2);
1685    return NO_EXIT;
1686}
1687
1688static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1689{
1690    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1691    gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1692    tcg_temp_free_i32(m3);
1693    gen_set_cc_nz_f64(s, o->in2);
1694    return NO_EXIT;
1695}
1696
1697static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1698{
1699    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1700    gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1701    tcg_temp_free_i32(m3);
1702    gen_set_cc_nz_f128(s, o->in1, o->in2);
1703    return NO_EXIT;
1704}
1705
1706static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1707{
1708    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1709    gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1710    tcg_temp_free_i32(m3);
1711    gen_set_cc_nz_f32(s, o->in2);
1712    return NO_EXIT;
1713}
1714
1715static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1716{
1717    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1718    gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1719    tcg_temp_free_i32(m3);
1720    gen_set_cc_nz_f64(s, o->in2);
1721    return NO_EXIT;
1722}
1723
1724static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1725{
1726    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1727    gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1728    tcg_temp_free_i32(m3);
1729    gen_set_cc_nz_f128(s, o->in1, o->in2);
1730    return NO_EXIT;
1731}
1732
1733static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1734{
1735    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1736    gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1737    tcg_temp_free_i32(m3);
1738    return NO_EXIT;
1739}
1740
1741static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1742{
1743    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1744    gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1745    tcg_temp_free_i32(m3);
1746    return NO_EXIT;
1747}
1748
1749static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1750{
1751    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1752    gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1753    tcg_temp_free_i32(m3);
1754    return_low128(o->out2);
1755    return NO_EXIT;
1756}
1757
1758static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1759{
1760    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1761    gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1762    tcg_temp_free_i32(m3);
1763    return NO_EXIT;
1764}
1765
1766static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1767{
1768    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1769    gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1770    tcg_temp_free_i32(m3);
1771    return NO_EXIT;
1772}
1773
1774static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1775{
1776    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1777    gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1778    tcg_temp_free_i32(m3);
1779    return_low128(o->out2);
1780    return NO_EXIT;
1781}
1782
1783static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1784{
1785    int r2 = get_field(s->fields, r2);
1786    TCGv_i64 len = tcg_temp_new_i64();
1787
1788    gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1789    set_cc_static(s);
1790    return_low128(o->out);
1791
1792    tcg_gen_add_i64(regs[r2], regs[r2], len);
1793    tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1794    tcg_temp_free_i64(len);
1795
1796    return NO_EXIT;
1797}
1798
1799static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1800{
1801    int l = get_field(s->fields, l1);
1802    TCGv_i32 vl;
1803
1804    switch (l + 1) {
1805    case 1:
1806        tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1807        tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1808        break;
1809    case 2:
1810        tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1811        tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1812        break;
1813    case 4:
1814        tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1815        tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1816        break;
1817    case 8:
1818        tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1819        tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1820        break;
1821    default:
1822        vl = tcg_const_i32(l);
1823        gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1824        tcg_temp_free_i32(vl);
1825        set_cc_static(s);
1826        return NO_EXIT;
1827    }
1828    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1829    return NO_EXIT;
1830}
1831
1832static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1833{
1834    int r1 = get_field(s->fields, r1);
1835    int r2 = get_field(s->fields, r2);
1836    TCGv_i32 t1, t2;
1837
1838    /* r1 and r2 must be even.  */
1839    if (r1 & 1 || r2 & 1) {
1840        gen_program_exception(s, PGM_SPECIFICATION);
1841        return EXIT_NORETURN;
1842    }
1843
1844    t1 = tcg_const_i32(r1);
1845    t2 = tcg_const_i32(r2);
1846    gen_helper_clcl(cc_op, cpu_env, t1, t2);
1847    tcg_temp_free_i32(t1);
1848    tcg_temp_free_i32(t2);
1849    set_cc_static(s);
1850    return NO_EXIT;
1851}
1852
1853static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1854{
1855    int r1 = get_field(s->fields, r1);
1856    int r3 = get_field(s->fields, r3);
1857    TCGv_i32 t1, t3;
1858
1859    /* r1 and r3 must be even.  */
1860    if (r1 & 1 || r3 & 1) {
1861        gen_program_exception(s, PGM_SPECIFICATION);
1862        return EXIT_NORETURN;
1863    }
1864
1865    t1 = tcg_const_i32(r1);
1866    t3 = tcg_const_i32(r3);
1867    gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1868    tcg_temp_free_i32(t1);
1869    tcg_temp_free_i32(t3);
1870    set_cc_static(s);
1871    return NO_EXIT;
1872}
1873
1874static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
1875{
1876    int r1 = get_field(s->fields, r1);
1877    int r3 = get_field(s->fields, r3);
1878    TCGv_i32 t1, t3;
1879
1880    /* r1 and r3 must be even.  */
1881    if (r1 & 1 || r3 & 1) {
1882        gen_program_exception(s, PGM_SPECIFICATION);
1883        return EXIT_NORETURN;
1884    }
1885
1886    t1 = tcg_const_i32(r1);
1887    t3 = tcg_const_i32(r3);
1888    gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1889    tcg_temp_free_i32(t1);
1890    tcg_temp_free_i32(t3);
1891    set_cc_static(s);
1892    return NO_EXIT;
1893}
1894
1895static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1896{
1897    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1898    TCGv_i32 t1 = tcg_temp_new_i32();
1899    tcg_gen_extrl_i64_i32(t1, o->in1);
1900    gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1901    set_cc_static(s);
1902    tcg_temp_free_i32(t1);
1903    tcg_temp_free_i32(m3);
1904    return NO_EXIT;
1905}
1906
1907static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1908{
1909    gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1910    set_cc_static(s);
1911    return_low128(o->in2);
1912    return NO_EXIT;
1913}
1914
1915static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1916{
1917    TCGv_i64 t = tcg_temp_new_i64();
1918    tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1919    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1920    tcg_gen_or_i64(o->out, o->out, t);
1921    tcg_temp_free_i64(t);
1922    return NO_EXIT;
1923}
1924
1925static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1926{
1927    int d2 = get_field(s->fields, d2);
1928    int b2 = get_field(s->fields, b2);
1929    TCGv_i64 addr, cc;
1930
1931    /* Note that in1 = R3 (new value) and
1932       in2 = (zero-extended) R1 (expected value).  */
1933
1934    addr = get_address(s, 0, b2, d2);
1935    tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1936                               get_mem_index(s), s->insn->data | MO_ALIGN);
1937    tcg_temp_free_i64(addr);
1938
1939    /* Are the memory and expected values (un)equal?  Note that this setcond
1940       produces the output CC value, thus the NE sense of the test.  */
1941    cc = tcg_temp_new_i64();
1942    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1943    tcg_gen_extrl_i64_i32(cc_op, cc);
1944    tcg_temp_free_i64(cc);
1945    set_cc_static(s);
1946
1947    return NO_EXIT;
1948}
1949
1950static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1951{
1952    int r1 = get_field(s->fields, r1);
1953    int r3 = get_field(s->fields, r3);
1954    int d2 = get_field(s->fields, d2);
1955    int b2 = get_field(s->fields, b2);
1956    TCGv_i64 addr;
1957    TCGv_i32 t_r1, t_r3;
1958
1959    /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
1960    addr = get_address(s, 0, b2, d2);
1961    t_r1 = tcg_const_i32(r1);
1962    t_r3 = tcg_const_i32(r3);
1963    if (tb_cflags(s->tb) & CF_PARALLEL) {
1964        gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
1965    } else {
1966        gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
1967    }
1968    tcg_temp_free_i64(addr);
1969    tcg_temp_free_i32(t_r1);
1970    tcg_temp_free_i32(t_r3);
1971
1972    set_cc_static(s);
1973    return NO_EXIT;
1974}
1975
1976static ExitStatus op_csst(DisasContext *s, DisasOps *o)
1977{
1978    int r3 = get_field(s->fields, r3);
1979    TCGv_i32 t_r3 = tcg_const_i32(r3);
1980
1981    if (tb_cflags(s->tb) & CF_PARALLEL) {
1982        gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->in1, o->in2);
1983    } else {
1984        gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
1985    }
1986    tcg_temp_free_i32(t_r3);
1987
1988    set_cc_static(s);
1989    return NO_EXIT;
1990}
1991
1992#ifndef CONFIG_USER_ONLY
1993static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1994{
1995    TCGMemOp mop = s->insn->data;
1996    TCGv_i64 addr, old, cc;
1997    TCGLabel *lab = gen_new_label();
1998
1999    /* Note that in1 = R1 (zero-extended expected value),
2000       out = R1 (original reg), out2 = R1+1 (new value).  */
2001
2002    check_privileged(s);
2003    addr = tcg_temp_new_i64();
2004    old = tcg_temp_new_i64();
2005    tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2006    tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2007                               get_mem_index(s), mop | MO_ALIGN);
2008    tcg_temp_free_i64(addr);
2009
2010    /* Are the memory and expected values (un)equal?  */
2011    cc = tcg_temp_new_i64();
2012    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2013    tcg_gen_extrl_i64_i32(cc_op, cc);
2014
2015    /* Write back the output now, so that it happens before the
2016       following branch, so that we don't need local temps.  */
2017    if ((mop & MO_SIZE) == MO_32) {
2018        tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2019    } else {
2020        tcg_gen_mov_i64(o->out, old);
2021    }
2022    tcg_temp_free_i64(old);
2023
2024    /* If the comparison was equal, and the LSB of R2 was set,
2025       then we need to flush the TLB (for all cpus).  */
2026    tcg_gen_xori_i64(cc, cc, 1);
2027    tcg_gen_and_i64(cc, cc, o->in2);
2028    tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2029    tcg_temp_free_i64(cc);
2030
2031    gen_helper_purge(cpu_env);
2032    gen_set_label(lab);
2033
2034    return NO_EXIT;
2035}
2036#endif
2037
2038static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2039{
2040    TCGv_i64 t1 = tcg_temp_new_i64();
2041    TCGv_i32 t2 = tcg_temp_new_i32();
2042    tcg_gen_extrl_i64_i32(t2, o->in1);
2043    gen_helper_cvd(t1, t2);
2044    tcg_temp_free_i32(t2);
2045    tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2046    tcg_temp_free_i64(t1);
2047    return NO_EXIT;
2048}
2049
2050static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2051{
2052    int m3 = get_field(s->fields, m3);
2053    TCGLabel *lab = gen_new_label();
2054    TCGCond c;
2055
2056    c = tcg_invert_cond(ltgt_cond[m3]);
2057    if (s->insn->data) {
2058        c = tcg_unsigned_cond(c);
2059    }
2060    tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2061
2062    /* Trap.  */
2063    gen_trap(s);
2064
2065    gen_set_label(lab);
2066    return NO_EXIT;
2067}
2068
2069static ExitStatus op_cuXX(DisasContext *s, DisasOps *o)
2070{
2071    int m3 = get_field(s->fields, m3);
2072    int r1 = get_field(s->fields, r1);
2073    int r2 = get_field(s->fields, r2);
2074    TCGv_i32 tr1, tr2, chk;
2075
2076    /* R1 and R2 must both be even.  */
2077    if ((r1 | r2) & 1) {
2078        gen_program_exception(s, PGM_SPECIFICATION);
2079        return EXIT_NORETURN;
2080    }
2081    if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2082        m3 = 0;
2083    }
2084
2085    tr1 = tcg_const_i32(r1);
2086    tr2 = tcg_const_i32(r2);
2087    chk = tcg_const_i32(m3);
2088
2089    switch (s->insn->data) {
2090    case 12:
2091        gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2092        break;
2093    case 14:
2094        gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2095        break;
2096    case 21:
2097        gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2098        break;
2099    case 24:
2100        gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2101        break;
2102    case 41:
2103        gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2104        break;
2105    case 42:
2106        gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2107        break;
2108    default:
2109        g_assert_not_reached();
2110    }
2111
2112    tcg_temp_free_i32(tr1);
2113    tcg_temp_free_i32(tr2);
2114    tcg_temp_free_i32(chk);
2115    set_cc_static(s);
2116    return NO_EXIT;
2117}
2118
2119#ifndef CONFIG_USER_ONLY
2120static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2121{
2122    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2123    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2124    TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2125
2126    check_privileged(s);
2127    update_psw_addr(s);
2128    gen_op_calc_cc(s);
2129
2130    gen_helper_diag(cpu_env, r1, r3, func_code);
2131
2132    tcg_temp_free_i32(func_code);
2133    tcg_temp_free_i32(r3);
2134    tcg_temp_free_i32(r1);
2135    return NO_EXIT;
2136}
2137#endif
2138
2139static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2140{
2141    gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2142    return_low128(o->out);
2143    return NO_EXIT;
2144}
2145
2146static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2147{
2148    gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2149    return_low128(o->out);
2150    return NO_EXIT;
2151}
2152
2153static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2154{
2155    gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2156    return_low128(o->out);
2157    return NO_EXIT;
2158}
2159
2160static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2161{
2162    gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2163    return_low128(o->out);
2164    return NO_EXIT;
2165}
2166
2167static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2168{
2169    gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2170    return NO_EXIT;
2171}
2172
2173static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2174{
2175    gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2176    return NO_EXIT;
2177}
2178
2179static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2180{
2181    gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2182    return_low128(o->out2);
2183    return NO_EXIT;
2184}
2185
2186static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2187{
2188    int r2 = get_field(s->fields, r2);
2189    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2190    return NO_EXIT;
2191}
2192
2193static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2194{
2195    /* No cache information provided.  */
2196    tcg_gen_movi_i64(o->out, -1);
2197    return NO_EXIT;
2198}
2199
2200static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2201{
2202    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2203    return NO_EXIT;
2204}
2205
2206static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2207{
2208    int r1 = get_field(s->fields, r1);
2209    int r2 = get_field(s->fields, r2);
2210    TCGv_i64 t = tcg_temp_new_i64();
2211
2212    /* Note the "subsequently" in the PoO, which implies a defined result
2213       if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2214    tcg_gen_shri_i64(t, psw_mask, 32);
2215    store_reg32_i64(r1, t);
2216    if (r2 != 0) {
2217        store_reg32_i64(r2, psw_mask);
2218    }
2219
2220    tcg_temp_free_i64(t);
2221    return NO_EXIT;
2222}
2223
2224static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2225{
2226    int r1 = get_field(s->fields, r1);
2227    TCGv_i32 ilen;
2228    TCGv_i64 v1;
2229
2230    /* Nested EXECUTE is not allowed.  */
2231    if (unlikely(s->ex_value)) {
2232        gen_program_exception(s, PGM_EXECUTE);
2233        return EXIT_NORETURN;
2234    }
2235
2236    update_psw_addr(s);
2237    update_cc_op(s);
2238
2239    if (r1 == 0) {
2240        v1 = tcg_const_i64(0);
2241    } else {
2242        v1 = regs[r1];
2243    }
2244
2245    ilen = tcg_const_i32(s->ilen);
2246    gen_helper_ex(cpu_env, ilen, v1, o->in2);
2247    tcg_temp_free_i32(ilen);
2248
2249    if (r1 == 0) {
2250        tcg_temp_free_i64(v1);
2251    }
2252
2253    return EXIT_PC_CC_UPDATED;
2254}
2255
2256static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2257{
2258    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2259    gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2260    tcg_temp_free_i32(m3);
2261    return NO_EXIT;
2262}
2263
2264static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2265{
2266    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2267    gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2268    tcg_temp_free_i32(m3);
2269    return NO_EXIT;
2270}
2271
2272static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2273{
2274    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2275    gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2276    return_low128(o->out2);
2277    tcg_temp_free_i32(m3);
2278    return NO_EXIT;
2279}
2280
2281static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2282{
2283    /* We'll use the original input for cc computation, since we get to
2284       compare that against 0, which ought to be better than comparing
2285       the real output against 64.  It also lets cc_dst be a convenient
2286       temporary during our computation.  */
2287    gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2288
2289    /* R1 = IN ? CLZ(IN) : 64.  */
2290    tcg_gen_clzi_i64(o->out, o->in2, 64);
2291
2292    /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2293       value by 64, which is undefined.  But since the shift is 64 iff the
2294       input is zero, we still get the correct result after and'ing.  */
2295    tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2296    tcg_gen_shr_i64(o->out2, o->out2, o->out);
2297    tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2298    return NO_EXIT;
2299}
2300
2301static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2302{
2303    int m3 = get_field(s->fields, m3);
2304    int pos, len, base = s->insn->data;
2305    TCGv_i64 tmp = tcg_temp_new_i64();
2306    uint64_t ccm;
2307
2308    switch (m3) {
2309    case 0xf:
2310        /* Effectively a 32-bit load.  */
2311        tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2312        len = 32;
2313        goto one_insert;
2314
2315    case 0xc:
2316    case 0x6:
2317    case 0x3:
2318        /* Effectively a 16-bit load.  */
2319        tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2320        len = 16;
2321        goto one_insert;
2322
2323    case 0x8:
2324    case 0x4:
2325    case 0x2:
2326    case 0x1:
2327        /* Effectively an 8-bit load.  */
2328        tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2329        len = 8;
2330        goto one_insert;
2331
2332    one_insert:
2333        pos = base + ctz32(m3) * 8;
2334        tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2335        ccm = ((1ull << len) - 1) << pos;
2336        break;
2337
2338    default:
2339        /* This is going to be a sequence of loads and inserts.  */
2340        pos = base + 32 - 8;
2341        ccm = 0;
2342        while (m3) {
2343            if (m3 & 0x8) {
2344                tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2345                tcg_gen_addi_i64(o->in2, o->in2, 1);
2346                tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2347                ccm |= 0xff << pos;
2348            }
2349            m3 = (m3 << 1) & 0xf;
2350            pos -= 8;
2351        }
2352        break;
2353    }
2354
2355    tcg_gen_movi_i64(tmp, ccm);
2356    gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2357    tcg_temp_free_i64(tmp);
2358    return NO_EXIT;
2359}
2360
2361static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2362{
2363    int shift = s->insn->data & 0xff;
2364    int size = s->insn->data >> 8;
2365    tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2366    return NO_EXIT;
2367}
2368
2369static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2370{
2371    TCGv_i64 t1;
2372
2373    gen_op_calc_cc(s);
2374    tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2375
2376    t1 = tcg_temp_new_i64();
2377    tcg_gen_shli_i64(t1, psw_mask, 20);
2378    tcg_gen_shri_i64(t1, t1, 36);
2379    tcg_gen_or_i64(o->out, o->out, t1);
2380
2381    tcg_gen_extu_i32_i64(t1, cc_op);
2382    tcg_gen_shli_i64(t1, t1, 28);
2383    tcg_gen_or_i64(o->out, o->out, t1);
2384    tcg_temp_free_i64(t1);
2385    return NO_EXIT;
2386}
2387
2388#ifndef CONFIG_USER_ONLY
2389static ExitStatus op_idte(DisasContext *s, DisasOps *o)
2390{
2391    TCGv_i32 m4;
2392
2393    check_privileged(s);
2394    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2395        m4 = tcg_const_i32(get_field(s->fields, m4));
2396    } else {
2397        m4 = tcg_const_i32(0);
2398    }
2399    gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2400    tcg_temp_free_i32(m4);
2401    return NO_EXIT;
2402}
2403
2404static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2405{
2406    TCGv_i32 m4;
2407
2408    check_privileged(s);
2409    if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2410        m4 = tcg_const_i32(get_field(s->fields, m4));
2411    } else {
2412        m4 = tcg_const_i32(0);
2413    }
2414    gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2415    tcg_temp_free_i32(m4);
2416    return NO_EXIT;
2417}
2418
2419static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2420{
2421    check_privileged(s);
2422    gen_helper_iske(o->out, cpu_env, o->in2);
2423    return NO_EXIT;
2424}
2425#endif
2426
2427static ExitStatus op_msa(DisasContext *s, DisasOps *o)
2428{
2429    int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2430    int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2431    int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2432    TCGv_i32 t_r1, t_r2, t_r3, type;
2433
2434    switch (s->insn->data) {
2435    case S390_FEAT_TYPE_KMCTR:
2436        if (r3 & 1 || !r3) {
2437            gen_program_exception(s, PGM_SPECIFICATION);
2438            return EXIT_NORETURN;
2439        }
2440        /* FALL THROUGH */
2441    case S390_FEAT_TYPE_PPNO:
2442    case S390_FEAT_TYPE_KMF:
2443    case S390_FEAT_TYPE_KMC:
2444    case S390_FEAT_TYPE_KMO:
2445    case S390_FEAT_TYPE_KM:
2446        if (r1 & 1 || !r1) {
2447            gen_program_exception(s, PGM_SPECIFICATION);
2448            return EXIT_NORETURN;
2449        }
2450        /* FALL THROUGH */
2451    case S390_FEAT_TYPE_KMAC:
2452    case S390_FEAT_TYPE_KIMD:
2453    case S390_FEAT_TYPE_KLMD:
2454        if (r2 & 1 || !r2) {
2455            gen_program_exception(s, PGM_SPECIFICATION);
2456            return EXIT_NORETURN;
2457        }
2458        /* FALL THROUGH */
2459    case S390_FEAT_TYPE_PCKMO:
2460    case S390_FEAT_TYPE_PCC:
2461        break;
2462    default:
2463        g_assert_not_reached();
2464    };
2465
2466    t_r1 = tcg_const_i32(r1);
2467    t_r2 = tcg_const_i32(r2);
2468    t_r3 = tcg_const_i32(r3);
2469    type = tcg_const_i32(s->insn->data);
2470    gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2471    set_cc_static(s);
2472    tcg_temp_free_i32(t_r1);
2473    tcg_temp_free_i32(t_r2);
2474    tcg_temp_free_i32(t_r3);
2475    tcg_temp_free_i32(type);
2476    return NO_EXIT;
2477}
2478
2479static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2480{
2481    gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2482    set_cc_static(s);
2483    return NO_EXIT;
2484}
2485
2486static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2487{
2488    gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2489    set_cc_static(s);
2490    return NO_EXIT;
2491}
2492
2493static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2494{
2495    gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2496    set_cc_static(s);
2497    return NO_EXIT;
2498}
2499
2500static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2501{
2502    /* The real output is indeed the original value in memory;
2503       recompute the addition for the computation of CC.  */
2504    tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2505                                 s->insn->data | MO_ALIGN);
2506    /* However, we need to recompute the addition for setting CC.  */
2507    tcg_gen_add_i64(o->out, o->in1, o->in2);
2508    return NO_EXIT;
2509}
2510
2511static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2512{
2513    /* The real output is indeed the original value in memory;
2514       recompute the addition for the computation of CC.  */
2515    tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2516                                 s->insn->data | MO_ALIGN);
2517    /* However, we need to recompute the operation for setting CC.  */
2518    tcg_gen_and_i64(o->out, o->in1, o->in2);
2519    return NO_EXIT;
2520}
2521
2522static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2523{
2524    /* The real output is indeed the original value in memory;
2525       recompute the addition for the computation of CC.  */
2526    tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2527                                s->insn->data | MO_ALIGN);
2528    /* However, we need to recompute the operation for setting CC.  */
2529    tcg_gen_or_i64(o->out, o->in1, o->in2);
2530    return NO_EXIT;
2531}
2532
2533static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2534{
2535    /* The real output is indeed the original value in memory;
2536       recompute the addition for the computation of CC.  */
2537    tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2538                                 s->insn->data | MO_ALIGN);
2539    /* However, we need to recompute the operation for setting CC.  */
2540    tcg_gen_xor_i64(o->out, o->in1, o->in2);
2541    return NO_EXIT;
2542}
2543
2544static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2545{
2546    gen_helper_ldeb(o->out, cpu_env, o->in2);
2547    return NO_EXIT;
2548}
2549
2550static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2551{
2552    gen_helper_ledb(o->out, cpu_env, o->in2);
2553    return NO_EXIT;
2554}
2555
2556static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2557{
2558    gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2559    return NO_EXIT;
2560}
2561
2562static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2563{
2564    gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2565    return NO_EXIT;
2566}
2567
2568static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2569{
2570    gen_helper_lxdb(o->out, cpu_env, o->in2);
2571    return_low128(o->out2);
2572    return NO_EXIT;
2573}
2574
2575static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2576{
2577    gen_helper_lxeb(o->out, cpu_env, o->in2);
2578    return_low128(o->out2);
2579    return NO_EXIT;
2580}
2581
2582static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2583{
2584    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2585    return NO_EXIT;
2586}
2587
2588static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2589{
2590    tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2591    return NO_EXIT;
2592}
2593
2594static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2595{
2596    tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2597    return NO_EXIT;
2598}
2599
2600static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2601{
2602    tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2603    return NO_EXIT;
2604}
2605
2606static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2607{
2608    tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2609    return NO_EXIT;
2610}
2611
2612static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2613{
2614    tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2615    return NO_EXIT;
2616}
2617
2618static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2619{
2620    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2621    return NO_EXIT;
2622}
2623
2624static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2625{
2626    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2627    return NO_EXIT;
2628}
2629
2630static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2631{
2632    TCGLabel *lab = gen_new_label();
2633    store_reg32_i64(get_field(s->fields, r1), o->in2);
2634    /* The value is stored even in case of trap. */
2635    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2636    gen_trap(s);
2637    gen_set_label(lab);
2638    return NO_EXIT;
2639}
2640
2641static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2642{
2643    TCGLabel *lab = gen_new_label();
2644    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2645    /* The value is stored even in case of trap. */
2646    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2647    gen_trap(s);
2648    gen_set_label(lab);
2649    return NO_EXIT;
2650}
2651
2652static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2653{
2654    TCGLabel *lab = gen_new_label();
2655    store_reg32h_i64(get_field(s->fields, r1), o->in2);
2656    /* The value is stored even in case of trap. */
2657    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2658    gen_trap(s);
2659    gen_set_label(lab);
2660    return NO_EXIT;
2661}
2662
2663static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2664{
2665    TCGLabel *lab = gen_new_label();
2666    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2667    /* The value is stored even in case of trap. */
2668    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2669    gen_trap(s);
2670    gen_set_label(lab);
2671    return NO_EXIT;
2672}
2673
2674static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2675{
2676    TCGLabel *lab = gen_new_label();
2677    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2678    /* The value is stored even in case of trap. */
2679    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2680    gen_trap(s);
2681    gen_set_label(lab);
2682    return NO_EXIT;
2683}
2684
2685static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2686{
2687    DisasCompare c;
2688
2689    disas_jcc(s, &c, get_field(s->fields, m3));
2690
2691    if (c.is_64) {
2692        tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2693                            o->in2, o->in1);
2694        free_compare(&c);
2695    } else {
2696        TCGv_i32 t32 = tcg_temp_new_i32();
2697        TCGv_i64 t, z;
2698
2699        tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2700        free_compare(&c);
2701
2702        t = tcg_temp_new_i64();
2703        tcg_gen_extu_i32_i64(t, t32);
2704        tcg_temp_free_i32(t32);
2705
2706        z = tcg_const_i64(0);
2707        tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2708        tcg_temp_free_i64(t);
2709        tcg_temp_free_i64(z);
2710    }
2711
2712    return NO_EXIT;
2713}
2714
2715#ifndef CONFIG_USER_ONLY
2716static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2717{
2718    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2719    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2720    check_privileged(s);
2721    gen_helper_lctl(cpu_env, r1, o->in2, r3);
2722    tcg_temp_free_i32(r1);
2723    tcg_temp_free_i32(r3);
2724    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2725    return EXIT_PC_STALE_NOCHAIN;
2726}
2727
2728static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2729{
2730    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2731    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2732    check_privileged(s);
2733    gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2734    tcg_temp_free_i32(r1);
2735    tcg_temp_free_i32(r3);
2736    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2737    return EXIT_PC_STALE_NOCHAIN;
2738}
2739
2740static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2741{
2742    check_privileged(s);
2743    gen_helper_lra(o->out, cpu_env, o->in2);
2744    set_cc_static(s);
2745    return NO_EXIT;
2746}
2747
2748static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2749{
2750    check_privileged(s);
2751
2752    tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2753    return NO_EXIT;
2754}
2755
2756static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2757{
2758    TCGv_i64 t1, t2;
2759
2760    check_privileged(s);
2761    per_breaking_event(s);
2762
2763    t1 = tcg_temp_new_i64();
2764    t2 = tcg_temp_new_i64();
2765    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2766    tcg_gen_addi_i64(o->in2, o->in2, 4);
2767    tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2768    /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
2769    tcg_gen_shli_i64(t1, t1, 32);
2770    gen_helper_load_psw(cpu_env, t1, t2);
2771    tcg_temp_free_i64(t1);
2772    tcg_temp_free_i64(t2);
2773    return EXIT_NORETURN;
2774}
2775
2776static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2777{
2778    TCGv_i64 t1, t2;
2779
2780    check_privileged(s);
2781    per_breaking_event(s);
2782
2783    t1 = tcg_temp_new_i64();
2784    t2 = tcg_temp_new_i64();
2785    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2786    tcg_gen_addi_i64(o->in2, o->in2, 8);
2787    tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2788    gen_helper_load_psw(cpu_env, t1, t2);
2789    tcg_temp_free_i64(t1);
2790    tcg_temp_free_i64(t2);
2791    return EXIT_NORETURN;
2792}
2793#endif
2794
2795static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2796{
2797    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2798    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2799    gen_helper_lam(cpu_env, r1, o->in2, r3);
2800    tcg_temp_free_i32(r1);
2801    tcg_temp_free_i32(r3);
2802    return NO_EXIT;
2803}
2804
2805static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2806{
2807    int r1 = get_field(s->fields, r1);
2808    int r3 = get_field(s->fields, r3);
2809    TCGv_i64 t1, t2;
2810
2811    /* Only one register to read. */
2812    t1 = tcg_temp_new_i64();
2813    if (unlikely(r1 == r3)) {
2814        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2815        store_reg32_i64(r1, t1);
2816        tcg_temp_free(t1);
2817        return NO_EXIT;
2818    }
2819
2820    /* First load the values of the first and last registers to trigger
2821       possible page faults. */
2822    t2 = tcg_temp_new_i64();
2823    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2824    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2825    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2826    store_reg32_i64(r1, t1);
2827    store_reg32_i64(r3, t2);
2828
2829    /* Only two registers to read. */
2830    if (((r1 + 1) & 15) == r3) {
2831        tcg_temp_free(t2);
2832        tcg_temp_free(t1);
2833        return NO_EXIT;
2834    }
2835
2836    /* Then load the remaining registers. Page fault can't occur. */
2837    r3 = (r3 - 1) & 15;
2838    tcg_gen_movi_i64(t2, 4);
2839    while (r1 != r3) {
2840        r1 = (r1 + 1) & 15;
2841        tcg_gen_add_i64(o->in2, o->in2, t2);
2842        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2843        store_reg32_i64(r1, t1);
2844    }
2845    tcg_temp_free(t2);
2846    tcg_temp_free(t1);
2847
2848    return NO_EXIT;
2849}
2850
2851static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2852{
2853    int r1 = get_field(s->fields, r1);
2854    int r3 = get_field(s->fields, r3);
2855    TCGv_i64 t1, t2;
2856
2857    /* Only one register to read. */
2858    t1 = tcg_temp_new_i64();
2859    if (unlikely(r1 == r3)) {
2860        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2861        store_reg32h_i64(r1, t1);
2862        tcg_temp_free(t1);
2863        return NO_EXIT;
2864    }
2865
2866    /* First load the values of the first and last registers to trigger
2867       possible page faults. */
2868    t2 = tcg_temp_new_i64();
2869    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2870    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2871    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2872    store_reg32h_i64(r1, t1);
2873    store_reg32h_i64(r3, t2);
2874
2875    /* Only two registers to read. */
2876    if (((r1 + 1) & 15) == r3) {
2877        tcg_temp_free(t2);
2878        tcg_temp_free(t1);
2879        return NO_EXIT;
2880    }
2881
2882    /* Then load the remaining registers. Page fault can't occur. */
2883    r3 = (r3 - 1) & 15;
2884    tcg_gen_movi_i64(t2, 4);
2885    while (r1 != r3) {
2886        r1 = (r1 + 1) & 15;
2887        tcg_gen_add_i64(o->in2, o->in2, t2);
2888        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2889        store_reg32h_i64(r1, t1);
2890    }
2891    tcg_temp_free(t2);
2892    tcg_temp_free(t1);
2893
2894    return NO_EXIT;
2895}
2896
2897static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2898{
2899    int r1 = get_field(s->fields, r1);
2900    int r3 = get_field(s->fields, r3);
2901    TCGv_i64 t1, t2;
2902
2903    /* Only one register to read. */
2904    if (unlikely(r1 == r3)) {
2905        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2906        return NO_EXIT;
2907    }
2908
2909    /* First load the values of the first and last registers to trigger
2910       possible page faults. */
2911    t1 = tcg_temp_new_i64();
2912    t2 = tcg_temp_new_i64();
2913    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2914    tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2915    tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2916    tcg_gen_mov_i64(regs[r1], t1);
2917    tcg_temp_free(t2);
2918
2919    /* Only two registers to read. */
2920    if (((r1 + 1) & 15) == r3) {
2921        tcg_temp_free(t1);
2922        return NO_EXIT;
2923    }
2924
2925    /* Then load the remaining registers. Page fault can't occur. */
2926    r3 = (r3 - 1) & 15;
2927    tcg_gen_movi_i64(t1, 8);
2928    while (r1 != r3) {
2929        r1 = (r1 + 1) & 15;
2930        tcg_gen_add_i64(o->in2, o->in2, t1);
2931        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2932    }
2933    tcg_temp_free(t1);
2934
2935    return NO_EXIT;
2936}
2937
2938static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2939{
2940    TCGv_i64 a1, a2;
2941    TCGMemOp mop = s->insn->data;
2942
2943    /* In a parallel context, stop the world and single step.  */
2944    if (tb_cflags(s->tb) & CF_PARALLEL) {
2945        potential_page_fault(s);
2946        gen_exception(EXCP_ATOMIC);
2947        return EXIT_NORETURN;
2948    }
2949
2950    /* In a serial context, perform the two loads ... */
2951    a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2952    a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2953    tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2954    tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2955    tcg_temp_free_i64(a1);
2956    tcg_temp_free_i64(a2);
2957
2958    /* ... and indicate that we performed them while interlocked.  */
2959    gen_op_movi_cc(s, 0);
2960    return NO_EXIT;
2961}
2962
2963static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
2964{
2965    if (tb_cflags(s->tb) & CF_PARALLEL) {
2966        gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
2967    } else {
2968        gen_helper_lpq(o->out, cpu_env, o->in2);
2969    }
2970    return_low128(o->out2);
2971    return NO_EXIT;
2972}
2973
2974#ifndef CONFIG_USER_ONLY
2975static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2976{
2977    check_privileged(s);
2978    gen_helper_lura(o->out, cpu_env, o->in2);
2979    return NO_EXIT;
2980}
2981
2982static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2983{
2984    check_privileged(s);
2985    gen_helper_lurag(o->out, cpu_env, o->in2);
2986    return NO_EXIT;
2987}
2988#endif
2989
2990static ExitStatus op_lzrb(DisasContext *s, DisasOps *o)
2991{
2992    tcg_gen_andi_i64(o->out, o->in2, -256);
2993    return NO_EXIT;
2994}
2995
2996static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2997{
2998    o->out = o->in2;
2999    o->g_out = o->g_in2;
3000    TCGV_UNUSED_I64(o->in2);
3001    o->g_in2 = false;
3002    return NO_EXIT;
3003}
3004
3005static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
3006{
3007    int b2 = get_field(s->fields, b2);
3008    TCGv ar1 = tcg_temp_new_i64();
3009
3010    o->out = o->in2;
3011    o->g_out = o->g_in2;
3012    TCGV_UNUSED_I64(o->in2);
3013    o->g_in2 = false;
3014
3015    switch (s->tb->flags & FLAG_MASK_ASC) {
3016    case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3017        tcg_gen_movi_i64(ar1, 0);
3018        break;
3019    case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3020        tcg_gen_movi_i64(ar1, 1);
3021        break;
3022    case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3023        if (b2) {
3024            tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3025        } else {
3026            tcg_gen_movi_i64(ar1, 0);
3027        }
3028        break;
3029    case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3030        tcg_gen_movi_i64(ar1, 2);
3031        break;
3032    }
3033
3034    tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3035    tcg_temp_free_i64(ar1);
3036
3037    return NO_EXIT;
3038}
3039
3040static ExitStatus op_movx(DisasContext *s, DisasOps *o)
3041{
3042    o->out = o->in1;
3043    o->out2 = o->in2;
3044    o->g_out = o->g_in1;
3045    o->g_out2 = o->g_in2;
3046    TCGV_UNUSED_I64(o->in1);
3047    TCGV_UNUSED_I64(o->in2);
3048    o->g_in1 = o->g_in2 = false;
3049    return NO_EXIT;
3050}
3051
3052static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
3053{
3054    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3055    gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3056    tcg_temp_free_i32(l);
3057    return NO_EXIT;
3058}
3059
3060static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
3061{
3062    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3063    gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3064    tcg_temp_free_i32(l);
3065    return NO_EXIT;
3066}
3067
3068static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3069{
3070    int r1 = get_field(s->fields, r1);
3071    int r2 = get_field(s->fields, r2);
3072    TCGv_i32 t1, t2;
3073
3074    /* r1 and r2 must be even.  */
3075    if (r1 & 1 || r2 & 1) {
3076        gen_program_exception(s, PGM_SPECIFICATION);
3077        return EXIT_NORETURN;
3078    }
3079
3080    t1 = tcg_const_i32(r1);
3081    t2 = tcg_const_i32(r2);
3082    gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3083    tcg_temp_free_i32(t1);
3084    tcg_temp_free_i32(t2);
3085    set_cc_static(s);
3086    return NO_EXIT;
3087}
3088
3089static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3090{
3091    int r1 = get_field(s->fields, r1);
3092    int r3 = get_field(s->fields, r3);
3093    TCGv_i32 t1, t3;
3094
3095    /* r1 and r3 must be even.  */
3096    if (r1 & 1 || r3 & 1) {
3097        gen_program_exception(s, PGM_SPECIFICATION);
3098        return EXIT_NORETURN;
3099    }
3100
3101    t1 = tcg_const_i32(r1);
3102    t3 = tcg_const_i32(r3);
3103    gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3104    tcg_temp_free_i32(t1);
3105    tcg_temp_free_i32(t3);
3106    set_cc_static(s);
3107    return NO_EXIT;
3108}
3109
3110static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
3111{
3112    int r1 = get_field(s->fields, r1);
3113    int r3 = get_field(s->fields, r3);
3114    TCGv_i32 t1, t3;
3115
3116    /* r1 and r3 must be even.  */
3117    if (r1 & 1 || r3 & 1) {
3118        gen_program_exception(s, PGM_SPECIFICATION);
3119        return EXIT_NORETURN;
3120    }
3121
3122    t1 = tcg_const_i32(r1);
3123    t3 = tcg_const_i32(r3);
3124    gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3125    tcg_temp_free_i32(t1);
3126    tcg_temp_free_i32(t3);
3127    set_cc_static(s);
3128    return NO_EXIT;
3129}
3130
3131static ExitStatus op_mvcos(DisasContext *s, DisasOps *o)
3132{
3133    int r3 = get_field(s->fields, r3);
3134    gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3135    set_cc_static(s);
3136    return NO_EXIT;
3137}
3138
3139#ifndef CONFIG_USER_ONLY
3140static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3141{
3142    int r1 = get_field(s->fields, l1);
3143    check_privileged(s);
3144    gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3145    set_cc_static(s);
3146    return NO_EXIT;
3147}
3148
3149static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3150{
3151    int r1 = get_field(s->fields, l1);
3152    check_privileged(s);
3153    gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3154    set_cc_static(s);
3155    return NO_EXIT;
3156}
3157#endif
3158
3159static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3160{
3161    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3162    gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3163    tcg_temp_free_i32(l);
3164    return NO_EXIT;
3165}
3166
3167static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3168{
3169    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3170    gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3171    tcg_temp_free_i32(l);
3172    return NO_EXIT;
3173}
3174
3175static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3176{
3177    gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3178    set_cc_static(s);
3179    return NO_EXIT;
3180}
3181
3182static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3183{
3184    gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3185    set_cc_static(s);
3186    return_low128(o->in2);
3187    return NO_EXIT;
3188}
3189
3190static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3191{
3192    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3193    gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3194    tcg_temp_free_i32(l);
3195    return NO_EXIT;
3196}
3197
3198static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3199{
3200    tcg_gen_mul_i64(o->out, o->in1, o->in2);
3201    return NO_EXIT;
3202}
3203
3204static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3205{
3206    tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3207    return NO_EXIT;
3208}
3209
3210static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3211{
3212    gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3213    return NO_EXIT;
3214}
3215
3216static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3217{
3218    gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3219    return NO_EXIT;
3220}
3221
3222static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3223{
3224    gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3225    return NO_EXIT;
3226}
3227
3228static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3229{
3230    gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3231    return_low128(o->out2);
3232    return NO_EXIT;
3233}
3234
3235static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3236{
3237    gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3238    return_low128(o->out2);
3239    return NO_EXIT;
3240}
3241
3242static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3243{
3244    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3245    gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3246    tcg_temp_free_i64(r3);
3247    return NO_EXIT;
3248}
3249
3250static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3251{
3252    int r3 = get_field(s->fields, r3);
3253    gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3254    return NO_EXIT;
3255}
3256
3257static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3258{
3259    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3260    gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3261    tcg_temp_free_i64(r3);
3262    return NO_EXIT;
3263}
3264
3265static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3266{
3267    int r3 = get_field(s->fields, r3);
3268    gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3269    return NO_EXIT;
3270}
3271
3272static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3273{
3274    TCGv_i64 z, n;
3275    z = tcg_const_i64(0);
3276    n = tcg_temp_new_i64();
3277    tcg_gen_neg_i64(n, o->in2);
3278    tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3279    tcg_temp_free_i64(n);
3280    tcg_temp_free_i64(z);
3281    return NO_EXIT;
3282}
3283
3284static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3285{
3286    tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3287    return NO_EXIT;
3288}
3289
3290static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3291{
3292    tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3293    return NO_EXIT;
3294}
3295
3296static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3297{
3298    tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3299    tcg_gen_mov_i64(o->out2, o->in2);
3300    return NO_EXIT;
3301}
3302
3303static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3304{
3305    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3306    gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3307    tcg_temp_free_i32(l);
3308    set_cc_static(s);
3309    return NO_EXIT;
3310}
3311
3312static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3313{
3314    tcg_gen_neg_i64(o->out, o->in2);
3315    return NO_EXIT;
3316}
3317
3318static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3319{
3320    tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3321    return NO_EXIT;
3322}
3323
3324static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3325{
3326    tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3327    return NO_EXIT;
3328}
3329
3330static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3331{
3332    tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3333    tcg_gen_mov_i64(o->out2, o->in2);
3334    return NO_EXIT;
3335}
3336
3337static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3338{
3339    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3340    gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3341    tcg_temp_free_i32(l);
3342    set_cc_static(s);
3343    return NO_EXIT;
3344}
3345
3346static ExitStatus op_or(DisasContext *s, DisasOps *o)
3347{
3348    tcg_gen_or_i64(o->out, o->in1, o->in2);
3349    return NO_EXIT;
3350}
3351
3352static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3353{
3354    int shift = s->insn->data & 0xff;
3355    int size = s->insn->data >> 8;
3356    uint64_t mask = ((1ull << size) - 1) << shift;
3357
3358    assert(!o->g_in2);
3359    tcg_gen_shli_i64(o->in2, o->in2, shift);
3360    tcg_gen_or_i64(o->out, o->in1, o->in2);
3361
3362    /* Produce the CC from only the bits manipulated.  */
3363    tcg_gen_andi_i64(cc_dst, o->out, mask);
3364    set_cc_nz_u64(s, cc_dst);
3365    return NO_EXIT;
3366}
3367
3368static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3369{
3370    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3371    gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3372    tcg_temp_free_i32(l);
3373    return NO_EXIT;
3374}
3375
3376static ExitStatus op_pka(DisasContext *s, DisasOps *o)
3377{
3378    int l2 = get_field(s->fields, l2) + 1;
3379    TCGv_i32 l;
3380
3381    /* The length must not exceed 32 bytes.  */
3382    if (l2 > 32) {
3383        gen_program_exception(s, PGM_SPECIFICATION);
3384        return EXIT_NORETURN;
3385    }
3386    l = tcg_const_i32(l2);
3387    gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3388    tcg_temp_free_i32(l);
3389    return NO_EXIT;
3390}
3391
3392static ExitStatus op_pku(DisasContext *s, DisasOps *o)
3393{
3394    int l2 = get_field(s->fields, l2) + 1;
3395    TCGv_i32 l;
3396
3397    /* The length must be even and should not exceed 64 bytes.  */
3398    if ((l2 & 1) || (l2 > 64)) {
3399        gen_program_exception(s, PGM_SPECIFICATION);
3400        return EXIT_NORETURN;
3401    }
3402    l = tcg_const_i32(l2);
3403    gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3404    tcg_temp_free_i32(l);
3405    return NO_EXIT;
3406}
3407
3408static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3409{
3410    gen_helper_popcnt(o->out, o->in2);
3411    return NO_EXIT;
3412}
3413
3414#ifndef CONFIG_USER_ONLY
3415static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3416{
3417    check_privileged(s);
3418    gen_helper_ptlb(cpu_env);
3419    return NO_EXIT;
3420}
3421#endif
3422
3423static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3424{
3425    int i3 = get_field(s->fields, i3);
3426    int i4 = get_field(s->fields, i4);
3427    int i5 = get_field(s->fields, i5);
3428    int do_zero = i4 & 0x80;
3429    uint64_t mask, imask, pmask;
3430    int pos, len, rot;
3431
3432    /* Adjust the arguments for the specific insn.  */
3433    switch (s->fields->op2) {
3434    case 0x55: /* risbg */
3435    case 0x59: /* risbgn */
3436        i3 &= 63;
3437        i4 &= 63;
3438        pmask = ~0;
3439        break;
3440    case 0x5d: /* risbhg */
3441        i3 &= 31;
3442        i4 &= 31;
3443        pmask = 0xffffffff00000000ull;
3444        break;
3445    case 0x51: /* risblg */
3446        i3 &= 31;
3447        i4 &= 31;
3448        pmask = 0x00000000ffffffffull;
3449        break;
3450    default:
3451        g_assert_not_reached();
3452    }
3453
3454    /* MASK is the set of bits to be inserted from R2.
3455       Take care for I3/I4 wraparound.  */
3456    mask = pmask >> i3;
3457    if (i3 <= i4) {
3458        mask ^= pmask >> i4 >> 1;
3459    } else {
3460        mask |= ~(pmask >> i4 >> 1);
3461    }
3462    mask &= pmask;
3463
3464    /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3465       insns, we need to keep the other half of the register.  */
3466    imask = ~mask | ~pmask;
3467    if (do_zero) {
3468        imask = ~pmask;
3469    }
3470
3471    len = i4 - i3 + 1;
3472    pos = 63 - i4;
3473    rot = i5 & 63;
3474    if (s->fields->op2 == 0x5d) {
3475        pos += 32;
3476    }
3477
3478    /* In some cases we can implement this with extract.  */
3479    if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3480        tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3481        return NO_EXIT;
3482    }
3483
3484    /* In some cases we can implement this with deposit.  */
3485    if (len > 0 && (imask == 0 || ~mask == imask)) {
3486        /* Note that we rotate the bits to be inserted to the lsb, not to
3487           the position as described in the PoO.  */
3488        rot = (rot - pos) & 63;
3489    } else {
3490        pos = -1;
3491    }
3492
3493    /* Rotate the input as necessary.  */
3494    tcg_gen_rotli_i64(o->in2, o->in2, rot);
3495
3496    /* Insert the selected bits into the output.  */
3497    if (pos >= 0) {
3498        if (imask == 0) {
3499            tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3500        } else {
3501            tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3502        }
3503    } else if (imask == 0) {
3504        tcg_gen_andi_i64(o->out, o->in2, mask);
3505    } else {
3506        tcg_gen_andi_i64(o->in2, o->in2, mask);
3507        tcg_gen_andi_i64(o->out, o->out, imask);
3508        tcg_gen_or_i64(o->out, o->out, o->in2);
3509    }
3510    return NO_EXIT;
3511}
3512
3513static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3514{
3515    int i3 = get_field(s->fields, i3);
3516    int i4 = get_field(s->fields, i4);
3517    int i5 = get_field(s->fields, i5);
3518    uint64_t mask;
3519
3520    /* If this is a test-only form, arrange to discard the result.  */
3521    if (i3 & 0x80) {
3522        o->out = tcg_temp_new_i64();
3523        o->g_out = false;
3524    }
3525
3526    i3 &= 63;
3527    i4 &= 63;
3528    i5 &= 63;
3529
3530    /* MASK is the set of bits to be operated on from R2.
3531       Take care for I3/I4 wraparound.  */
3532    mask = ~0ull >> i3;
3533    if (i3 <= i4) {
3534        mask ^= ~0ull >> i4 >> 1;
3535    } else {
3536        mask |= ~(~0ull >> i4 >> 1);
3537    }
3538
3539    /* Rotate the input as necessary.  */
3540    tcg_gen_rotli_i64(o->in2, o->in2, i5);
3541
3542    /* Operate.  */
3543    switch (s->fields->op2) {
3544    case 0x55: /* AND */
3545        tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3546        tcg_gen_and_i64(o->out, o->out, o->in2);
3547        break;
3548    case 0x56: /* OR */
3549        tcg_gen_andi_i64(o->in2, o->in2, mask);
3550        tcg_gen_or_i64(o->out, o->out, o->in2);
3551        break;
3552    case 0x57: /* XOR */
3553        tcg_gen_andi_i64(o->in2, o->in2, mask);
3554        tcg_gen_xor_i64(o->out, o->out, o->in2);
3555        break;
3556    default:
3557        abort();
3558    }
3559
3560    /* Set the CC.  */
3561    tcg_gen_andi_i64(cc_dst, o->out, mask);
3562    set_cc_nz_u64(s, cc_dst);
3563    return NO_EXIT;
3564}
3565
3566static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3567{
3568    tcg_gen_bswap16_i64(o->out, o->in2);
3569    return NO_EXIT;
3570}
3571
3572static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3573{
3574    tcg_gen_bswap32_i64(o->out, o->in2);
3575    return NO_EXIT;
3576}
3577
3578static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3579{
3580    tcg_gen_bswap64_i64(o->out, o->in2);
3581    return NO_EXIT;
3582}
3583
3584static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3585{
3586    TCGv_i32 t1 = tcg_temp_new_i32();
3587    TCGv_i32 t2 = tcg_temp_new_i32();
3588    TCGv_i32 to = tcg_temp_new_i32();
3589    tcg_gen_extrl_i64_i32(t1, o->in1);
3590    tcg_gen_extrl_i64_i32(t2, o->in2);
3591    tcg_gen_rotl_i32(to, t1, t2);
3592    tcg_gen_extu_i32_i64(o->out, to);
3593    tcg_temp_free_i32(t1);
3594    tcg_temp_free_i32(t2);
3595    tcg_temp_free_i32(to);
3596    return NO_EXIT;
3597}
3598
3599static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3600{
3601    tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3602    return NO_EXIT;
3603}
3604
3605#ifndef CONFIG_USER_ONLY
3606static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3607{
3608    check_privileged(s);
3609    gen_helper_rrbe(cc_op, cpu_env, o->in2);
3610    set_cc_static(s);
3611    return NO_EXIT;
3612}
3613
3614static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3615{
3616    check_privileged(s);
3617    gen_helper_sacf(cpu_env, o->in2);
3618    /* Addressing mode has changed, so end the block.  */
3619    return EXIT_PC_STALE;
3620}
3621#endif
3622
3623static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3624{
3625    int sam = s->insn->data;
3626    TCGv_i64 tsam;
3627    uint64_t mask;
3628
3629    switch (sam) {
3630    case 0:
3631        mask = 0xffffff;
3632        break;
3633    case 1:
3634        mask = 0x7fffffff;
3635        break;
3636    default:
3637        mask = -1;
3638        break;
3639    }
3640
3641    /* Bizarre but true, we check the address of the current insn for the
3642       specification exception, not the next to be executed.  Thus the PoO
3643       documents that Bad Things Happen two bytes before the end.  */
3644    if (s->pc & ~mask) {
3645        gen_program_exception(s, PGM_SPECIFICATION);
3646        return EXIT_NORETURN;
3647    }
3648    s->next_pc &= mask;
3649
3650    tsam = tcg_const_i64(sam);
3651    tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3652    tcg_temp_free_i64(tsam);
3653
3654    /* Always exit the TB, since we (may have) changed execution mode.  */
3655    return EXIT_PC_STALE;
3656}
3657
3658static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3659{
3660    int r1 = get_field(s->fields, r1);
3661    tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3662    return NO_EXIT;
3663}
3664
3665static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3666{
3667    gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3668    return NO_EXIT;
3669}
3670
3671static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3672{
3673    gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3674    return NO_EXIT;
3675}
3676
3677static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3678{
3679    gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3680    return_low128(o->out2);
3681    return NO_EXIT;
3682}
3683
3684static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3685{
3686    gen_helper_sqeb(o->out, cpu_env, o->in2);
3687    return NO_EXIT;
3688}
3689
3690static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3691{
3692    gen_helper_sqdb(o->out, cpu_env, o->in2);
3693    return NO_EXIT;
3694}
3695
3696static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3697{
3698    gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3699    return_low128(o->out2);
3700    return NO_EXIT;
3701}
3702
3703#ifndef CONFIG_USER_ONLY
3704static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3705{
3706    check_privileged(s);
3707    potential_page_fault(s);
3708    gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3709    set_cc_static(s);
3710    return NO_EXIT;
3711}
3712
3713static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3714{
3715    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3716    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3717    check_privileged(s);
3718    gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3719    set_cc_static(s);
3720    tcg_temp_free_i32(r1);
3721    tcg_temp_free_i32(r3);
3722    return NO_EXIT;
3723}
3724#endif
3725
3726static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3727{
3728    DisasCompare c;
3729    TCGv_i64 a, h;
3730    TCGLabel *lab;
3731    int r1;
3732
3733    disas_jcc(s, &c, get_field(s->fields, m3));
3734
3735    /* We want to store when the condition is fulfilled, so branch
3736       out when it's not */
3737    c.cond = tcg_invert_cond(c.cond);
3738
3739    lab = gen_new_label();
3740    if (c.is_64) {
3741        tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3742    } else {
3743        tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3744    }
3745    free_compare(&c);
3746
3747    r1 = get_field(s->fields, r1);
3748    a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3749    switch (s->insn->data) {
3750    case 1: /* STOCG */
3751        tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3752        break;
3753    case 0: /* STOC */
3754        tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3755        break;
3756    case 2: /* STOCFH */
3757        h = tcg_temp_new_i64();
3758        tcg_gen_shri_i64(h, regs[r1], 32);
3759        tcg_gen_qemu_st32(h, a, get_mem_index(s));
3760        tcg_temp_free_i64(h);
3761        break;
3762    default:
3763        g_assert_not_reached();
3764    }
3765    tcg_temp_free_i64(a);
3766
3767    gen_set_label(lab);
3768    return NO_EXIT;
3769}
3770
3771static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3772{
3773    uint64_t sign = 1ull << s->insn->data;
3774    enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3775    gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3776    tcg_gen_shl_i64(o->out, o->in1, o->in2);
3777    /* The arithmetic left shift is curious in that it does not affect
3778       the sign bit.  Copy that over from the source unchanged.  */
3779    tcg_gen_andi_i64(o->out, o->out, ~sign);
3780    tcg_gen_andi_i64(o->in1, o->in1, sign);
3781    tcg_gen_or_i64(o->out, o->out, o->in1);
3782    return NO_EXIT;
3783}
3784
3785static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3786{
3787    tcg_gen_shl_i64(o->out, o->in1, o->in2);
3788    return NO_EXIT;
3789}
3790
3791static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3792{
3793    tcg_gen_sar_i64(o->out, o->in1, o->in2);
3794    return NO_EXIT;
3795}
3796
3797static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3798{
3799    tcg_gen_shr_i64(o->out, o->in1, o->in2);
3800    return NO_EXIT;
3801}
3802
3803static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3804{
3805    gen_helper_sfpc(cpu_env, o->in2);
3806    return NO_EXIT;
3807}
3808
3809static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3810{
3811    gen_helper_sfas(cpu_env, o->in2);
3812    return NO_EXIT;
3813}
3814
3815static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3816{
3817    int b2 = get_field(s->fields, b2);
3818    int d2 = get_field(s->fields, d2);
3819    TCGv_i64 t1 = tcg_temp_new_i64();
3820    TCGv_i64 t2 = tcg_temp_new_i64();
3821    int mask, pos, len;
3822
3823    switch (s->fields->op2) {
3824    case 0x99: /* SRNM */
3825        pos = 0, len = 2;
3826        break;
3827    case 0xb8: /* SRNMB */
3828        pos = 0, len = 3;
3829        break;
3830    case 0xb9: /* SRNMT */
3831        pos = 4, len = 3;
3832        break;
3833    default:
3834        tcg_abort();
3835    }
3836    mask = (1 << len) - 1;
3837
3838    /* Insert the value into the appropriate field of the FPC.  */
3839    if (b2 == 0) {
3840        tcg_gen_movi_i64(t1, d2 & mask);
3841    } else {
3842        tcg_gen_addi_i64(t1, regs[b2], d2);
3843        tcg_gen_andi_i64(t1, t1, mask);
3844    }
3845    tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3846    tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3847    tcg_temp_free_i64(t1);
3848
3849    /* Then install the new FPC to set the rounding mode in fpu_status.  */
3850    gen_helper_sfpc(cpu_env, t2);
3851    tcg_temp_free_i64(t2);
3852    return NO_EXIT;
3853}
3854
3855static ExitStatus op_spm(DisasContext *s, DisasOps *o)
3856{
3857    tcg_gen_extrl_i64_i32(cc_op, o->in1);
3858    tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
3859    set_cc_static(s);
3860
3861    tcg_gen_shri_i64(o->in1, o->in1, 24);
3862    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
3863    return NO_EXIT;
3864}
3865
3866#ifndef CONFIG_USER_ONLY
3867static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3868{
3869    check_privileged(s);
3870    tcg_gen_shri_i64(o->in2, o->in2, 4);
3871    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3872    return NO_EXIT;
3873}
3874
3875static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3876{
3877    check_privileged(s);
3878    gen_helper_sske(cpu_env, o->in1, o->in2);
3879    return NO_EXIT;
3880}
3881
3882static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3883{
3884    check_privileged(s);
3885    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3886    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3887    return EXIT_PC_STALE_NOCHAIN;
3888}
3889
3890static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3891{
3892    check_privileged(s);
3893    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
3894    return NO_EXIT;
3895}
3896
3897static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3898{
3899    gen_helper_stck(o->out, cpu_env);
3900    /* ??? We don't implement clock states.  */
3901    gen_op_movi_cc(s, 0);
3902    return NO_EXIT;
3903}
3904
3905static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3906{
3907    TCGv_i64 c1 = tcg_temp_new_i64();
3908    TCGv_i64 c2 = tcg_temp_new_i64();
3909    gen_helper_stck(c1, cpu_env);
3910    /* Shift the 64-bit value into its place as a zero-extended
3911       104-bit value.  Note that "bit positions 64-103 are always
3912       non-zero so that they compare differently to STCK"; we set
3913       the least significant bit to 1.  */
3914    tcg_gen_shli_i64(c2, c1, 56);
3915    tcg_gen_shri_i64(c1, c1, 8);
3916    tcg_gen_ori_i64(c2, c2, 0x10000);
3917    tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3918    tcg_gen_addi_i64(o->in2, o->in2, 8);
3919    tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3920    tcg_temp_free_i64(c1);
3921    tcg_temp_free_i64(c2);
3922    /* ??? We don't implement clock states.  */
3923    gen_op_movi_cc(s, 0);
3924    return NO_EXIT;
3925}
3926
3927static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3928{
3929    check_privileged(s);
3930    gen_helper_sckc(cpu_env, o->in2);
3931    return NO_EXIT;
3932}
3933
3934static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3935{
3936    check_privileged(s);
3937    gen_helper_stckc(o->out, cpu_env);
3938    return NO_EXIT;
3939}
3940
3941static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3942{
3943    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3944    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3945    check_privileged(s);
3946    gen_helper_stctg(cpu_env, r1, o->in2, r3);
3947    tcg_temp_free_i32(r1);
3948    tcg_temp_free_i32(r3);
3949    return NO_EXIT;
3950}
3951
3952static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3953{
3954    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3955    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3956    check_privileged(s);
3957    gen_helper_stctl(cpu_env, r1, o->in2, r3);
3958    tcg_temp_free_i32(r1);
3959    tcg_temp_free_i32(r3);
3960    return NO_EXIT;
3961}
3962
3963static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3964{
3965    check_privileged(s);
3966    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
3967    tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
3968    return NO_EXIT;
3969}
3970
3971static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3972{
3973    check_privileged(s);
3974    gen_helper_spt(cpu_env, o->in2);
3975    return NO_EXIT;
3976}
3977
3978static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3979{
3980    check_privileged(s);
3981    gen_helper_stfl(cpu_env);
3982    return NO_EXIT;
3983}
3984
3985static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3986{
3987    check_privileged(s);
3988    gen_helper_stpt(o->out, cpu_env);
3989    return NO_EXIT;
3990}
3991
3992static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3993{
3994    check_privileged(s);
3995    potential_page_fault(s);
3996    gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3997    set_cc_static(s);
3998    return NO_EXIT;
3999}
4000
4001static ExitStatus op_spx(DisasContext *s, DisasOps *o)
4002{
4003    check_privileged(s);
4004    gen_helper_spx(cpu_env, o->in2);
4005    return NO_EXIT;
4006}
4007
4008static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
4009{
4010    check_privileged(s);
4011    potential_page_fault(s);
4012    gen_helper_xsch(cpu_env, regs[1]);
4013    set_cc_static(s);
4014    return NO_EXIT;
4015}
4016
4017static ExitStatus op_csch(DisasContext *s, DisasOps *o)
4018{
4019    check_privileged(s);
4020    potential_page_fault(s);
4021    gen_helper_csch(cpu_env, regs[1]);
4022    set_cc_static(s);
4023    return NO_EXIT;
4024}
4025
4026static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
4027{
4028    check_privileged(s);
4029    potential_page_fault(s);
4030    gen_helper_hsch(cpu_env, regs[1]);
4031    set_cc_static(s);
4032    return NO_EXIT;
4033}
4034
4035static ExitStatus op_msch(DisasContext *s, DisasOps *o)
4036{
4037    check_privileged(s);
4038    potential_page_fault(s);
4039    gen_helper_msch(cpu_env, regs[1], o->in2);
4040    set_cc_static(s);
4041    return NO_EXIT;
4042}
4043
4044static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
4045{
4046    check_privileged(s);
4047    potential_page_fault(s);
4048    gen_helper_rchp(cpu_env, regs[1]);
4049    set_cc_static(s);
4050    return NO_EXIT;
4051}
4052
4053static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
4054{
4055    check_privileged(s);
4056    potential_page_fault(s);
4057    gen_helper_rsch(cpu_env, regs[1]);
4058    set_cc_static(s);
4059    return NO_EXIT;
4060}
4061
4062static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
4063{
4064    check_privileged(s);
4065    potential_page_fault(s);
4066    gen_helper_ssch(cpu_env, regs[1], o->in2);
4067    set_cc_static(s);
4068    return NO_EXIT;
4069}
4070
4071static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
4072{
4073    check_privileged(s);
4074    potential_page_fault(s);
4075    gen_helper_stsch(cpu_env, regs[1], o->in2);
4076    set_cc_static(s);
4077    return NO_EXIT;
4078}
4079
4080static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
4081{
4082    check_privileged(s);
4083    potential_page_fault(s);
4084    gen_helper_tsch(cpu_env, regs[1], o->in2);
4085    set_cc_static(s);
4086    return NO_EXIT;
4087}
4088
4089static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
4090{
4091    check_privileged(s);
4092    potential_page_fault(s);
4093    gen_helper_chsc(cpu_env, o->in2);
4094    set_cc_static(s);
4095    return NO_EXIT;
4096}
4097
4098static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
4099{
4100    check_privileged(s);
4101    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4102    tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4103    return NO_EXIT;
4104}
4105
4106static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
4107{
4108    uint64_t i2 = get_field(s->fields, i2);
4109    TCGv_i64 t;
4110
4111    check_privileged(s);
4112
4113    /* It is important to do what the instruction name says: STORE THEN.
4114       If we let the output hook perform the store then if we fault and
4115       restart, we'll have the wrong SYSTEM MASK in place.  */
4116    t = tcg_temp_new_i64();
4117    tcg_gen_shri_i64(t, psw_mask, 56);
4118    tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4119    tcg_temp_free_i64(t);
4120
4121    if (s->fields->op == 0xac) {
4122        tcg_gen_andi_i64(psw_mask, psw_mask,
4123                         (i2 << 56) | 0x00ffffffffffffffull);
4124    } else {
4125        tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4126    }
4127
4128    /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4129    return EXIT_PC_STALE_NOCHAIN;
4130}
4131
4132static ExitStatus op_stura(DisasContext *s, DisasOps *o)
4133{
4134    check_privileged(s);
4135    gen_helper_stura(cpu_env, o->in2, o->in1);
4136    return NO_EXIT;
4137}
4138
4139static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
4140{
4141    check_privileged(s);
4142    gen_helper_sturg(cpu_env, o->in2, o->in1);
4143    return NO_EXIT;
4144}
4145#endif
4146
4147static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
4148{
4149    gen_helper_stfle(cc_op, cpu_env, o->in2);
4150    set_cc_static(s);
4151    return NO_EXIT;
4152}
4153
4154static ExitStatus op_st8(DisasContext *s, DisasOps *o)
4155{
4156    tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4157    return NO_EXIT;
4158}
4159
4160static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4161{
4162    tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4163    return NO_EXIT;
4164}
4165
4166static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4167{
4168    tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4169    return NO_EXIT;
4170}
4171
4172static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4173{
4174    tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4175    return NO_EXIT;
4176}
4177
4178static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4179{
4180    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4181    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4182    gen_helper_stam(cpu_env, r1, o->in2, r3);
4183    tcg_temp_free_i32(r1);
4184    tcg_temp_free_i32(r3);
4185    return NO_EXIT;
4186}
4187
4188static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4189{
4190    int m3 = get_field(s->fields, m3);
4191    int pos, base = s->insn->data;
4192    TCGv_i64 tmp = tcg_temp_new_i64();
4193
4194    pos = base + ctz32(m3) * 8;
4195    switch (m3) {
4196    case 0xf:
4197        /* Effectively a 32-bit store.  */
4198        tcg_gen_shri_i64(tmp, o->in1, pos);
4199        tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4200        break;
4201
4202    case 0xc:
4203    case 0x6:
4204    case 0x3:
4205        /* Effectively a 16-bit store.  */
4206        tcg_gen_shri_i64(tmp, o->in1, pos);
4207        tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4208        break;
4209
4210    case 0x8:
4211    case 0x4:
4212    case 0x2:
4213    case 0x1:
4214        /* Effectively an 8-bit store.  */
4215        tcg_gen_shri_i64(tmp, o->in1, pos);
4216        tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4217        break;
4218
4219    default:
4220        /* This is going to be a sequence of shifts and stores.  */
4221        pos = base + 32 - 8;
4222        while (m3) {
4223            if (m3 & 0x8) {
4224                tcg_gen_shri_i64(tmp, o->in1, pos);
4225                tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4226                tcg_gen_addi_i64(o->in2, o->in2, 1);
4227            }
4228            m3 = (m3 << 1) & 0xf;
4229            pos -= 8;
4230        }
4231        break;
4232    }
4233    tcg_temp_free_i64(tmp);
4234    return NO_EXIT;
4235}
4236
4237static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4238{
4239    int r1 = get_field(s->fields, r1);
4240    int r3 = get_field(s->fields, r3);
4241    int size = s->insn->data;
4242    TCGv_i64 tsize = tcg_const_i64(size);
4243
4244    while (1) {
4245        if (size == 8) {
4246            tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4247        } else {
4248            tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4249        }
4250        if (r1 == r3) {
4251            break;
4252        }
4253        tcg_gen_add_i64(o->in2, o->in2, tsize);
4254        r1 = (r1 + 1) & 15;
4255    }
4256
4257    tcg_temp_free_i64(tsize);
4258    return NO_EXIT;
4259}
4260
4261static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4262{
4263    int r1 = get_field(s->fields, r1);
4264    int r3 = get_field(s->fields, r3);
4265    TCGv_i64 t = tcg_temp_new_i64();
4266    TCGv_i64 t4 = tcg_const_i64(4);
4267    TCGv_i64 t32 = tcg_const_i64(32);
4268
4269    while (1) {
4270        tcg_gen_shl_i64(t, regs[r1], t32);
4271        tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4272        if (r1 == r3) {
4273            break;
4274        }
4275        tcg_gen_add_i64(o->in2, o->in2, t4);
4276        r1 = (r1 + 1) & 15;
4277    }
4278
4279    tcg_temp_free_i64(t);
4280    tcg_temp_free_i64(t4);
4281    tcg_temp_free_i64(t32);
4282    return NO_EXIT;
4283}
4284
4285static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
4286{
4287    if (tb_cflags(s->tb) & CF_PARALLEL) {
4288        gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4289    } else {
4290        gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4291    }
4292    return NO_EXIT;
4293}
4294
4295static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4296{
4297    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4298    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4299
4300    gen_helper_srst(cpu_env, r1, r2);
4301
4302    tcg_temp_free_i32(r1);
4303    tcg_temp_free_i32(r2);
4304    set_cc_static(s);
4305    return NO_EXIT;
4306}
4307
4308static ExitStatus op_srstu(DisasContext *s, DisasOps *o)
4309{
4310    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4311    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4312
4313    gen_helper_srstu(cpu_env, r1, r2);
4314
4315    tcg_temp_free_i32(r1);
4316    tcg_temp_free_i32(r2);
4317    set_cc_static(s);
4318    return NO_EXIT;
4319}
4320
4321static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4322{
4323    tcg_gen_sub_i64(o->out, o->in1, o->in2);
4324    return NO_EXIT;
4325}
4326
4327static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4328{
4329    DisasCompare cmp;
4330    TCGv_i64 borrow;
4331
4332    tcg_gen_sub_i64(o->out, o->in1, o->in2);
4333
4334    /* The !borrow flag is the msb of CC.  Since we want the inverse of
4335       that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4.  */
4336    disas_jcc(s, &cmp, 8 | 4);
4337    borrow = tcg_temp_new_i64();
4338    if (cmp.is_64) {
4339        tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4340    } else {
4341        TCGv_i32 t = tcg_temp_new_i32();
4342        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4343        tcg_gen_extu_i32_i64(borrow, t);
4344        tcg_temp_free_i32(t);
4345    }
4346    free_compare(&cmp);
4347
4348    tcg_gen_sub_i64(o->out, o->out, borrow);
4349    tcg_temp_free_i64(borrow);
4350    return NO_EXIT;
4351}
4352
4353static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4354{
4355    TCGv_i32 t;
4356
4357    update_psw_addr(s);
4358    update_cc_op(s);
4359
4360    t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4361    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4362    tcg_temp_free_i32(t);
4363
4364    t = tcg_const_i32(s->ilen);
4365    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4366    tcg_temp_free_i32(t);
4367
4368    gen_exception(EXCP_SVC);
4369    return EXIT_NORETURN;
4370}
4371
4372static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4373{
4374    int cc = 0;
4375
4376    cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4377    cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4378    gen_op_movi_cc(s, cc);
4379    return NO_EXIT;
4380}
4381
4382static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4383{
4384    gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4385    set_cc_static(s);
4386    return NO_EXIT;
4387}
4388
4389static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4390{
4391    gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4392    set_cc_static(s);
4393    return NO_EXIT;
4394}
4395
4396static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4397{
4398    gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4399    set_cc_static(s);
4400    return NO_EXIT;
4401}
4402
4403#ifndef CONFIG_USER_ONLY
4404
4405static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4406{
4407    check_privileged(s);
4408    gen_helper_testblock(cc_op, cpu_env, o->in2);
4409    set_cc_static(s);
4410    return NO_EXIT;
4411}
4412
4413static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4414{
4415    gen_helper_tprot(cc_op, o->addr1, o->in2);
4416    set_cc_static(s);
4417    return NO_EXIT;
4418}
4419
4420#endif
4421
4422static ExitStatus op_tp(DisasContext *s, DisasOps *o)
4423{
4424    TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4425    gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4426    tcg_temp_free_i32(l1);
4427    set_cc_static(s);
4428    return NO_EXIT;
4429}
4430
4431static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4432{
4433    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4434    gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4435    tcg_temp_free_i32(l);
4436    set_cc_static(s);
4437    return NO_EXIT;
4438}
4439
4440static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4441{
4442    gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4443    return_low128(o->out2);
4444    set_cc_static(s);
4445    return NO_EXIT;
4446}
4447
4448static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4449{
4450    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4451    gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4452    tcg_temp_free_i32(l);
4453    set_cc_static(s);
4454    return NO_EXIT;
4455}
4456
4457static ExitStatus op_trtr(DisasContext *s, DisasOps *o)
4458{
4459    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4460    gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4461    tcg_temp_free_i32(l);
4462    set_cc_static(s);
4463    return NO_EXIT;
4464}
4465
4466static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
4467{
4468    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4469    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4470    TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4471    TCGv_i32 tst = tcg_temp_new_i32();
4472    int m3 = get_field(s->fields, m3);
4473
4474    if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4475        m3 = 0;
4476    }
4477    if (m3 & 1) {
4478        tcg_gen_movi_i32(tst, -1);
4479    } else {
4480        tcg_gen_extrl_i64_i32(tst, regs[0]);
4481        if (s->insn->opc & 3) {
4482            tcg_gen_ext8u_i32(tst, tst);
4483        } else {
4484            tcg_gen_ext16u_i32(tst, tst);
4485        }
4486    }
4487    gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4488
4489    tcg_temp_free_i32(r1);
4490    tcg_temp_free_i32(r2);
4491    tcg_temp_free_i32(sizes);
4492    tcg_temp_free_i32(tst);
4493    set_cc_static(s);
4494    return NO_EXIT;
4495}
4496
4497static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4498{
4499    TCGv_i32 t1 = tcg_const_i32(0xff);
4500    tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4501    tcg_gen_extract_i32(cc_op, t1, 7, 1);
4502    tcg_temp_free_i32(t1);
4503    set_cc_static(s);
4504    return NO_EXIT;
4505}
4506
4507static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4508{
4509    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4510    gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4511    tcg_temp_free_i32(l);
4512    return NO_EXIT;
4513}
4514
4515static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
4516{
4517    int l1 = get_field(s->fields, l1) + 1;
4518    TCGv_i32 l;
4519
4520    /* The length must not exceed 32 bytes.  */
4521    if (l1 > 32) {
4522        gen_program_exception(s, PGM_SPECIFICATION);
4523        return EXIT_NORETURN;
4524    }
4525    l = tcg_const_i32(l1);
4526    gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4527    tcg_temp_free_i32(l);
4528    set_cc_static(s);
4529    return NO_EXIT;
4530}
4531
4532static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
4533{
4534    int l1 = get_field(s->fields, l1) + 1;
4535    TCGv_i32 l;
4536
4537    /* The length must be even and should not exceed 64 bytes.  */
4538    if ((l1 & 1) || (l1 > 64)) {
4539        gen_program_exception(s, PGM_SPECIFICATION);
4540        return EXIT_NORETURN;
4541    }
4542    l = tcg_const_i32(l1);
4543    gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4544    tcg_temp_free_i32(l);
4545    set_cc_static(s);
4546    return NO_EXIT;
4547}
4548
4549
4550static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4551{
4552    int d1 = get_field(s->fields, d1);
4553    int d2 = get_field(s->fields, d2);
4554    int b1 = get_field(s->fields, b1);
4555    int b2 = get_field(s->fields, b2);
4556    int l = get_field(s->fields, l1);
4557    TCGv_i32 t32;
4558
4559    o->addr1 = get_address(s, 0, b1, d1);
4560
4561    /* If the addresses are identical, this is a store/memset of zero.  */
4562    if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4563        o->in2 = tcg_const_i64(0);
4564
4565        l++;
4566        while (l >= 8) {
4567            tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4568            l -= 8;
4569            if (l > 0) {
4570                tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4571            }
4572        }
4573        if (l >= 4) {
4574            tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4575            l -= 4;
4576            if (l > 0) {
4577                tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4578            }
4579        }
4580        if (l >= 2) {
4581            tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4582            l -= 2;
4583            if (l > 0) {
4584                tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4585            }
4586        }
4587        if (l) {
4588            tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4589        }
4590        gen_op_movi_cc(s, 0);
4591        return NO_EXIT;
4592    }
4593
4594    /* But in general we'll defer to a helper.  */
4595    o->in2 = get_address(s, 0, b2, d2);
4596    t32 = tcg_const_i32(l);
4597    gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4598    tcg_temp_free_i32(t32);
4599    set_cc_static(s);
4600    return NO_EXIT;
4601}
4602
4603static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4604{
4605    tcg_gen_xor_i64(o->out, o->in1, o->in2);
4606    return NO_EXIT;
4607}
4608
4609static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4610{
4611    int shift = s->insn->data & 0xff;
4612    int size = s->insn->data >> 8;
4613    uint64_t mask = ((1ull << size) - 1) << shift;
4614
4615    assert(!o->g_in2);
4616    tcg_gen_shli_i64(o->in2, o->in2, shift);
4617    tcg_gen_xor_i64(o->out, o->in1, o->in2);
4618
4619    /* Produce the CC from only the bits manipulated.  */
4620    tcg_gen_andi_i64(cc_dst, o->out, mask);
4621    set_cc_nz_u64(s, cc_dst);
4622    return NO_EXIT;
4623}
4624
4625static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4626{
4627    o->out = tcg_const_i64(0);
4628    return NO_EXIT;
4629}
4630
4631static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4632{
4633    o->out = tcg_const_i64(0);
4634    o->out2 = o->out;
4635    o->g_out2 = true;
4636    return NO_EXIT;
4637}
4638
4639/* ====================================================================== */
4640/* The "Cc OUTput" generators.  Given the generated output (and in some cases
4641   the original inputs), update the various cc data structures in order to
4642   be able to compute the new condition code.  */
4643
4644static void cout_abs32(DisasContext *s, DisasOps *o)
4645{
4646    gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4647}
4648
4649static void cout_abs64(DisasContext *s, DisasOps *o)
4650{
4651    gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4652}
4653
4654static void cout_adds32(DisasContext *s, DisasOps *o)
4655{
4656    gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4657}
4658
4659static void cout_adds64(DisasContext *s, DisasOps *o)
4660{
4661    gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4662}
4663
4664static void cout_addu32(DisasContext *s, DisasOps *o)
4665{
4666    gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4667}
4668
4669static void cout_addu64(DisasContext *s, DisasOps *o)
4670{
4671    gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4672}
4673
4674static void cout_addc32(DisasContext *s, DisasOps *o)
4675{
4676    gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4677}
4678
4679static void cout_addc64(DisasContext *s, DisasOps *o)
4680{
4681    gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4682}
4683
4684static void cout_cmps32(DisasContext *s, DisasOps *o)
4685{
4686    gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4687}
4688
4689static void cout_cmps64(DisasContext *s, DisasOps *o)
4690{
4691    gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4692}
4693
4694static void cout_cmpu32(DisasContext *s, DisasOps *o)
4695{
4696    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4697}
4698
4699static void cout_cmpu64(DisasContext *s, DisasOps *o)
4700{
4701    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4702}
4703
4704static void cout_f32(DisasContext *s, DisasOps *o)
4705{
4706    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4707}
4708
4709static void cout_f64(DisasContext *s, DisasOps *o)
4710{
4711    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4712}
4713
4714static void cout_f128(DisasContext *s, DisasOps *o)
4715{
4716    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4717}
4718
4719static void cout_nabs32(DisasContext *s, DisasOps *o)
4720{
4721    gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4722}
4723
4724static void cout_nabs64(DisasContext *s, DisasOps *o)
4725{
4726    gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4727}
4728
4729static void cout_neg32(DisasContext *s, DisasOps *o)
4730{
4731    gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4732}
4733
4734static void cout_neg64(DisasContext *s, DisasOps *o)
4735{
4736    gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4737}
4738
4739static void cout_nz32(DisasContext *s, DisasOps *o)
4740{
4741    tcg_gen_ext32u_i64(cc_dst, o->out);
4742    gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4743}
4744
4745static void cout_nz64(DisasContext *s, DisasOps *o)
4746{
4747    gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4748}
4749
4750static void cout_s32(DisasContext *s, DisasOps *o)
4751{
4752    gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4753}
4754
4755static void cout_s64(DisasContext *s, DisasOps *o)
4756{
4757    gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4758}
4759
4760static void cout_subs32(DisasContext *s, DisasOps *o)
4761{
4762    gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4763}
4764
4765static void cout_subs64(DisasContext *s, DisasOps *o)
4766{
4767    gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4768}
4769
4770static void cout_subu32(DisasContext *s, DisasOps *o)
4771{
4772    gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4773}
4774
4775static void cout_subu64(DisasContext *s, DisasOps *o)
4776{
4777    gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4778}
4779
4780static void cout_subb32(DisasContext *s, DisasOps *o)
4781{
4782    gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4783}
4784
4785static void cout_subb64(DisasContext *s, DisasOps *o)
4786{
4787    gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4788}
4789
4790static void cout_tm32(DisasContext *s, DisasOps *o)
4791{
4792    gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4793}
4794
4795static void cout_tm64(DisasContext *s, DisasOps *o)
4796{
4797    gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4798}
4799
4800/* ====================================================================== */
4801/* The "PREParation" generators.  These initialize the DisasOps.OUT fields
4802   with the TCG register to which we will write.  Used in combination with
4803   the "wout" generators, in some cases we need a new temporary, and in
4804   some cases we can write to a TCG global.  */
4805
4806static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4807{
4808    o->out = tcg_temp_new_i64();
4809}
4810#define SPEC_prep_new 0
4811
4812static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4813{
4814    o->out = tcg_temp_new_i64();
4815    o->out2 = tcg_temp_new_i64();
4816}
4817#define SPEC_prep_new_P 0
4818
4819static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4820{
4821    o->out = regs[get_field(f, r1)];
4822    o->g_out = true;
4823}
4824#define SPEC_prep_r1 0
4825
4826static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4827{
4828    int r1 = get_field(f, r1);
4829    o->out = regs[r1];
4830    o->out2 = regs[r1 + 1];
4831    o->g_out = o->g_out2 = true;
4832}
4833#define SPEC_prep_r1_P SPEC_r1_even
4834
4835static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4836{
4837    o->out = fregs[get_field(f, r1)];
4838    o->g_out = true;
4839}
4840#define SPEC_prep_f1 0
4841
4842static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4843{
4844    int r1 = get_field(f, r1);
4845    o->out = fregs[r1];
4846    o->out2 = fregs[r1 + 2];
4847    o->g_out = o->g_out2 = true;
4848}
4849#define SPEC_prep_x1 SPEC_r1_f128
4850
4851/* ====================================================================== */
4852/* The "Write OUTput" generators.  These generally perform some non-trivial
4853   copy of data to TCG globals, or to main memory.  The trivial cases are
4854   generally handled by having a "prep" generator install the TCG global
4855   as the destination of the operation.  */
4856
4857static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4858{
4859    store_reg(get_field(f, r1), o->out);
4860}
4861#define SPEC_wout_r1 0
4862
4863static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4864{
4865    int r1 = get_field(f, r1);
4866    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4867}
4868#define SPEC_wout_r1_8 0
4869
4870static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4871{
4872    int r1 = get_field(f, r1);
4873    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4874}
4875#define SPEC_wout_r1_16 0
4876
4877static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4878{
4879    store_reg32_i64(get_field(f, r1), o->out);
4880}
4881#define SPEC_wout_r1_32 0
4882
4883static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4884{
4885    store_reg32h_i64(get_field(f, r1), o->out);
4886}
4887#define SPEC_wout_r1_32h 0
4888
4889static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4890{
4891    int r1 = get_field(f, r1);
4892    store_reg32_i64(r1, o->out);
4893    store_reg32_i64(r1 + 1, o->out2);
4894}
4895#define SPEC_wout_r1_P32 SPEC_r1_even
4896
4897static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4898{
4899    int r1 = get_field(f, r1);
4900    store_reg32_i64(r1 + 1, o->out);
4901    tcg_gen_shri_i64(o->out, o->out, 32);
4902    store_reg32_i64(r1, o->out);
4903}
4904#define SPEC_wout_r1_D32 SPEC_r1_even
4905
4906static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4907{
4908    int r3 = get_field(f, r3);
4909    store_reg32_i64(r3, o->out);
4910    store_reg32_i64(r3 + 1, o->out2);
4911}
4912#define SPEC_wout_r3_P32 SPEC_r3_even
4913
4914static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4915{
4916    int r3 = get_field(f, r3);
4917    store_reg(r3, o->out);
4918    store_reg(r3 + 1, o->out2);
4919}
4920#define SPEC_wout_r3_P64 SPEC_r3_even
4921
4922static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4923{
4924    store_freg32_i64(get_field(f, r1), o->out);
4925}
4926#define SPEC_wout_e1 0
4927
4928static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4929{
4930    store_freg(get_field(f, r1), o->out);
4931}
4932#define SPEC_wout_f1 0
4933
4934static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4935{
4936    int f1 = get_field(s->fields, r1);
4937    store_freg(f1, o->out);
4938    store_freg(f1 + 2, o->out2);
4939}
4940#define SPEC_wout_x1 SPEC_r1_f128
4941
4942static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4943{
4944    if (get_field(f, r1) != get_field(f, r2)) {
4945        store_reg32_i64(get_field(f, r1), o->out);
4946    }
4947}
4948#define SPEC_wout_cond_r1r2_32 0
4949
4950static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4951{
4952    if (get_field(f, r1) != get_field(f, r2)) {
4953        store_freg32_i64(get_field(f, r1), o->out);
4954    }
4955}
4956#define SPEC_wout_cond_e1e2 0
4957
4958static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4959{
4960    tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4961}
4962#define SPEC_wout_m1_8 0
4963
4964static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4965{
4966    tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4967}
4968#define SPEC_wout_m1_16 0
4969
4970static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4971{
4972    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4973}
4974#define SPEC_wout_m1_32 0
4975
4976static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4977{
4978    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4979}
4980#define SPEC_wout_m1_64 0
4981
4982static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4983{
4984    tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4985}
4986#define SPEC_wout_m2_32 0
4987
4988static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4989{
4990    store_reg(get_field(f, r1), o->in2);
4991}
4992#define SPEC_wout_in2_r1 0
4993
4994static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4995{
4996    store_reg32_i64(get_field(f, r1), o->in2);
4997}
4998#define SPEC_wout_in2_r1_32 0
4999
5000/* ====================================================================== */
5001/* The "INput 1" generators.  These load the first operand to an insn.  */
5002
5003static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5004{
5005    o->in1 = load_reg(get_field(f, r1));
5006}
5007#define SPEC_in1_r1 0
5008
5009static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5010{
5011    o->in1 = regs[get_field(f, r1)];
5012    o->g_in1 = true;
5013}
5014#define SPEC_in1_r1_o 0
5015
5016static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5017{
5018    o->in1 = tcg_temp_new_i64();
5019    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5020}
5021#define SPEC_in1_r1_32s 0
5022
5023static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5024{
5025    o->in1 = tcg_temp_new_i64();
5026    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5027}
5028#define SPEC_in1_r1_32u 0
5029
5030static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5031{
5032    o->in1 = tcg_temp_new_i64();
5033    tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5034}
5035#define SPEC_in1_r1_sr32 0
5036
5037static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5038{
5039    o->in1 = load_reg(get_field(f, r1) + 1);
5040}
5041#define SPEC_in1_r1p1 SPEC_r1_even
5042
5043static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5044{
5045    o->in1 = tcg_temp_new_i64();
5046    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5047}
5048#define SPEC_in1_r1p1_32s SPEC_r1_even
5049
5050static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5051{
5052    o->in1 = tcg_temp_new_i64();
5053    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5054}
5055#define SPEC_in1_r1p1_32u SPEC_r1_even
5056
5057static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5058{
5059    int r1 = get_field(f, r1);
5060    o->in1 = tcg_temp_new_i64();
5061    tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5062}
5063#define SPEC_in1_r1_D32 SPEC_r1_even
5064
5065static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5066{
5067    o->in1 = load_reg(get_field(f, r2));
5068}
5069#define SPEC_in1_r2 0
5070
5071static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5072{
5073    o->in1 = tcg_temp_new_i64();
5074    tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5075}
5076#define SPEC_in1_r2_sr32 0
5077
5078static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5079{
5080    o->in1 = load_reg(get_field(f, r3));
5081}
5082#define SPEC_in1_r3 0
5083
5084static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5085{
5086    o->in1 = regs[get_field(f, r3)];
5087    o->g_in1 = true;
5088}
5089#define SPEC_in1_r3_o 0
5090
5091static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5092{
5093    o->in1 = tcg_temp_new_i64();
5094    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5095}
5096#define SPEC_in1_r3_32s 0
5097
5098static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5099{
5100    o->in1 = tcg_temp_new_i64();
5101    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5102}
5103#define SPEC_in1_r3_32u 0
5104
5105static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5106{
5107    int r3 = get_field(f, r3);
5108    o->in1 = tcg_temp_new_i64();
5109    tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5110}
5111#define SPEC_in1_r3_D32 SPEC_r3_even
5112
5113static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5114{
5115    o->in1 = load_freg32_i64(get_field(f, r1));
5116}
5117#define SPEC_in1_e1 0
5118
5119static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5120{
5121    o->in1 = fregs[get_field(f, r1)];
5122    o->g_in1 = true;
5123}
5124#define SPEC_in1_f1_o 0
5125
5126static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5127{
5128    int r1 = get_field(f, r1);
5129    o->out = fregs[r1];
5130    o->out2 = fregs[r1 + 2];
5131    o->g_out = o->g_out2 = true;
5132}
5133#define SPEC_in1_x1_o SPEC_r1_f128
5134
5135static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5136{
5137    o->in1 = fregs[get_field(f, r3)];
5138    o->g_in1 = true;
5139}
5140#define SPEC_in1_f3_o 0
5141
5142static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5143{
5144    o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5145}
5146#define SPEC_in1_la1 0
5147
5148static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5149{
5150    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5151    o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5152}
5153#define SPEC_in1_la2 0
5154
5155static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5156{
5157    in1_la1(s, f, o);
5158    o->in1 = tcg_temp_new_i64();
5159    tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5160}
5161#define SPEC_in1_m1_8u 0
5162
5163static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5164{
5165    in1_la1(s, f, o);
5166    o->in1 = tcg_temp_new_i64();
5167    tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5168}
5169#define SPEC_in1_m1_16s 0
5170
5171static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5172{
5173    in1_la1(s, f, o);
5174    o->in1 = tcg_temp_new_i64();
5175    tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5176}
5177#define SPEC_in1_m1_16u 0
5178
5179static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5180{
5181    in1_la1(s, f, o);
5182    o->in1 = tcg_temp_new_i64();
5183    tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5184}
5185#define SPEC_in1_m1_32s 0
5186
5187static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5188{
5189    in1_la1(s, f, o);
5190    o->in1 = tcg_temp_new_i64();
5191    tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5192}
5193#define SPEC_in1_m1_32u 0
5194
5195static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5196{
5197    in1_la1(s, f, o);
5198    o->in1 = tcg_temp_new_i64();
5199    tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5200}
5201#define SPEC_in1_m1_64 0
5202
5203/* ====================================================================== */
5204/* The "INput 2" generators.  These load the second operand to an insn.  */
5205
5206static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5207{
5208    o->in2 = regs[get_field(f, r1)];
5209    o->g_in2 = true;
5210}
5211#define SPEC_in2_r1_o 0
5212
5213static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5214{
5215    o->in2 = tcg_temp_new_i64();
5216    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5217}
5218#define SPEC_in2_r1_16u 0
5219
5220static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5221{
5222    o->in2 = tcg_temp_new_i64();
5223    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5224}
5225#define SPEC_in2_r1_32u 0
5226
5227static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5228{
5229    int r1 = get_field(f, r1);
5230    o->in2 = tcg_temp_new_i64();
5231    tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5232}
5233#define SPEC_in2_r1_D32 SPEC_r1_even
5234
5235static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5236{
5237    o->in2 = load_reg(get_field(f, r2));
5238}
5239#define SPEC_in2_r2 0
5240
5241static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5242{
5243    o->in2 = regs[get_field(f, r2)];
5244    o->g_in2 = true;
5245}
5246#define SPEC_in2_r2_o 0
5247
5248static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5249{
5250    int r2 = get_field(f, r2);
5251    if (r2 != 0) {
5252        o->in2 = load_reg(r2);
5253    }
5254}
5255#define SPEC_in2_r2_nz 0
5256
5257static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5258{
5259    o->in2 = tcg_temp_new_i64();
5260    tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5261}
5262#define SPEC_in2_r2_8s 0
5263
5264static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5265{
5266    o->in2 = tcg_temp_new_i64();
5267    tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5268}
5269#define SPEC_in2_r2_8u 0
5270
5271static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5272{
5273    o->in2 = tcg_temp_new_i64();
5274    tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5275}
5276#define SPEC_in2_r2_16s 0
5277
5278static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5279{
5280    o->in2 = tcg_temp_new_i64();
5281    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5282}
5283#define SPEC_in2_r2_16u 0
5284
5285static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5286{
5287    o->in2 = load_reg(get_field(f, r3));
5288}
5289#define SPEC_in2_r3 0
5290
5291static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5292{
5293    o->in2 = tcg_temp_new_i64();
5294    tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5295}
5296#define SPEC_in2_r3_sr32 0
5297
5298static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5299{
5300    o->in2 = tcg_temp_new_i64();
5301    tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5302}
5303#define SPEC_in2_r2_32s 0
5304
5305static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5306{
5307    o->in2 = tcg_temp_new_i64();
5308    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5309}
5310#define SPEC_in2_r2_32u 0
5311
5312static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5313{
5314    o->in2 = tcg_temp_new_i64();
5315    tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5316}
5317#define SPEC_in2_r2_sr32 0
5318
5319static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5320{
5321    o->in2 = load_freg32_i64(get_field(f, r2));
5322}
5323#define SPEC_in2_e2 0
5324
5325static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5326{
5327    o->in2 = fregs[get_field(f, r2)];
5328    o->g_in2 = true;
5329}
5330#define SPEC_in2_f2_o 0
5331
5332static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5333{
5334    int r2 = get_field(f, r2);
5335    o->in1 = fregs[r2];
5336    o->in2 = fregs[r2 + 2];
5337    o->g_in1 = o->g_in2 = true;
5338}
5339#define SPEC_in2_x2_o SPEC_r2_f128
5340
5341static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5342{
5343    o->in2 = get_address(s, 0, get_field(f, r2), 0);
5344}
5345#define SPEC_in2_ra2 0
5346
5347static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5348{
5349    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5350    o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5351}
5352#define SPEC_in2_a2 0
5353
5354static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5355{
5356    o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5357}
5358#define SPEC_in2_ri2 0
5359
5360static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5361{
5362    help_l2_shift(s, f, o, 31);
5363}
5364#define SPEC_in2_sh32 0
5365
5366static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5367{
5368    help_l2_shift(s, f, o, 63);
5369}
5370#define SPEC_in2_sh64 0
5371
5372static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5373{
5374    in2_a2(s, f, o);
5375    tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5376}
5377#define SPEC_in2_m2_8u 0
5378
5379static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5380{
5381    in2_a2(s, f, o);
5382    tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5383}
5384#define SPEC_in2_m2_16s 0
5385
5386static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5387{
5388    in2_a2(s, f, o);
5389    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5390}
5391#define SPEC_in2_m2_16u 0
5392
5393static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5394{
5395    in2_a2(s, f, o);
5396    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5397}
5398#define SPEC_in2_m2_32s 0
5399
5400static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5401{
5402    in2_a2(s, f, o);
5403    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5404}
5405#define SPEC_in2_m2_32u 0
5406
5407static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5408{
5409    in2_a2(s, f, o);
5410    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5411}
5412#define SPEC_in2_m2_64 0
5413
5414static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5415{
5416    in2_ri2(s, f, o);
5417    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5418}
5419#define SPEC_in2_mri2_16u 0
5420
5421static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5422{
5423    in2_ri2(s, f, o);
5424    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5425}
5426#define SPEC_in2_mri2_32s 0
5427
5428static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5429{
5430    in2_ri2(s, f, o);
5431    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5432}
5433#define SPEC_in2_mri2_32u 0
5434
5435static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5436{
5437    in2_ri2(s, f, o);
5438    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5439}
5440#define SPEC_in2_mri2_64 0
5441
5442static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5443{
5444    o->in2 = tcg_const_i64(get_field(f, i2));
5445}
5446#define SPEC_in2_i2 0
5447
5448static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5449{
5450    o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5451}
5452#define SPEC_in2_i2_8u 0
5453
5454static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5455{
5456    o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5457}
5458#define SPEC_in2_i2_16u 0
5459
5460static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5461{
5462    o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5463}
5464#define SPEC_in2_i2_32u 0
5465
5466static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5467{
5468    uint64_t i2 = (uint16_t)get_field(f, i2);
5469    o->in2 = tcg_const_i64(i2 << s->insn->data);
5470}
5471#define SPEC_in2_i2_16u_shl 0
5472
5473static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5474{
5475    uint64_t i2 = (uint32_t)get_field(f, i2);
5476    o->in2 = tcg_const_i64(i2 << s->insn->data);
5477}
5478#define SPEC_in2_i2_32u_shl 0
5479
5480#ifndef CONFIG_USER_ONLY
5481static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5482{
5483    o->in2 = tcg_const_i64(s->fields->raw_insn);
5484}
5485#define SPEC_in2_insn 0
5486#endif
5487
5488/* ====================================================================== */
5489
5490/* Find opc within the table of insns.  This is formulated as a switch
5491   statement so that (1) we get compile-time notice of cut-paste errors
5492   for duplicated opcodes, and (2) the compiler generates the binary
5493   search tree, rather than us having to post-process the table.  */
5494
5495#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5496    D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5497
5498#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5499
5500enum DisasInsnEnum {
5501#include "insn-data.def"
5502};
5503
5504#undef D
5505#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) {                       \
5506    .opc = OPC,                                                             \
5507    .fmt = FMT_##FT,                                                        \
5508    .fac = FAC_##FC,                                                        \
5509    .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
5510    .name = #NM,                                                            \
5511    .help_in1 = in1_##I1,                                                   \
5512    .help_in2 = in2_##I2,                                                   \
5513    .help_prep = prep_##P,                                                  \
5514    .help_wout = wout_##W,                                                  \
5515    .help_cout = cout_##CC,                                                 \
5516    .help_op = op_##OP,                                                     \
5517    .data = D                                                               \
5518 },
5519
5520/* Allow 0 to be used for NULL in the table below.  */
5521#define in1_0  NULL
5522#define in2_0  NULL
5523#define prep_0  NULL
5524#define wout_0  NULL
5525#define cout_0  NULL
5526#define op_0  NULL
5527
5528#define SPEC_in1_0 0
5529#define SPEC_in2_0 0
5530#define SPEC_prep_0 0
5531#define SPEC_wout_0 0
5532
5533/* Give smaller names to the various facilities.  */
5534#define FAC_Z           S390_FEAT_ZARCH
5535#define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5536#define FAC_DFP         S390_FEAT_DFP
5537#define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5538#define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
5539#define FAC_EE          S390_FEAT_EXECUTE_EXT
5540#define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
5541#define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
5542#define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5543#define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5544#define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5545#define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
5546#define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
5547#define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5548#define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5549#define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
5550#define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
5551#define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
5552#define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
5553#define FAC_PC          S390_FEAT_STFLE_45 /* population count */
5554#define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
5555#define FAC_SFLE        S390_FEAT_STFLE
5556#define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5557#define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5558#define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5559#define FAC_DAT_ENH     S390_FEAT_DAT_ENH
5560#define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
5561#define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
5562#define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
5563#define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5564#define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
5565#define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
5566#define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5567#define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5568#define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5569
5570static const DisasInsn insn_info[] = {
5571#include "insn-data.def"
5572};
5573
5574#undef D
5575#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5576    case OPC: return &insn_info[insn_ ## NM];
5577
5578static const DisasInsn *lookup_opc(uint16_t opc)
5579{
5580    switch (opc) {
5581#include "insn-data.def"
5582    default:
5583        return NULL;
5584    }
5585}
5586
5587#undef D
5588#undef C
5589
5590/* Extract a field from the insn.  The INSN should be left-aligned in
5591   the uint64_t so that we can more easily utilize the big-bit-endian
5592   definitions we extract from the Principals of Operation.  */
5593
5594static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5595{
5596    uint32_t r, m;
5597
5598    if (f->size == 0) {
5599        return;
5600    }
5601
5602    /* Zero extract the field from the insn.  */
5603    r = (insn << f->beg) >> (64 - f->size);
5604
5605    /* Sign-extend, or un-swap the field as necessary.  */
5606    switch (f->type) {
5607    case 0: /* unsigned */
5608        break;
5609    case 1: /* signed */
5610        assert(f->size <= 32);
5611        m = 1u << (f->size - 1);
5612        r = (r ^ m) - m;
5613        break;
5614    case 2: /* dl+dh split, signed 20 bit. */
5615        r = ((int8_t)r << 12) | (r >> 8);
5616        break;
5617    default:
5618        abort();
5619    }
5620
5621    /* Validate that the "compressed" encoding we selected above is valid.
5622       I.e. we havn't make two different original fields overlap.  */
5623    assert(((o->presentC >> f->indexC) & 1) == 0);
5624    o->presentC |= 1 << f->indexC;
5625    o->presentO |= 1 << f->indexO;
5626
5627    o->c[f->indexC] = r;
5628}
5629
5630/* Lookup the insn at the current PC, extracting the operands into O and
5631   returning the info struct for the insn.  Returns NULL for invalid insn.  */
5632
5633static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5634                                     DisasFields *f)
5635{
5636    uint64_t insn, pc = s->pc;
5637    int op, op2, ilen;
5638    const DisasInsn *info;
5639
5640    if (unlikely(s->ex_value)) {
5641        /* Drop the EX data now, so that it's clear on exception paths.  */
5642        TCGv_i64 zero = tcg_const_i64(0);
5643        tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5644        tcg_temp_free_i64(zero);
5645
5646        /* Extract the values saved by EXECUTE.  */
5647        insn = s->ex_value & 0xffffffffffff0000ull;
5648        ilen = s->ex_value & 0xf;
5649        op = insn >> 56;
5650    } else {
5651        insn = ld_code2(env, pc);
5652        op = (insn >> 8) & 0xff;
5653        ilen = get_ilen(op);
5654        switch (ilen) {
5655        case 2:
5656            insn = insn << 48;
5657            break;
5658        case 4:
5659            insn = ld_code4(env, pc) << 32;
5660            break;
5661        case 6:
5662            insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5663            break;
5664        default:
5665            g_assert_not_reached();
5666        }
5667    }
5668    s->next_pc = s->pc + ilen;
5669    s->ilen = ilen;
5670
5671    /* We can't actually determine the insn format until we've looked up
5672       the full insn opcode.  Which we can't do without locating the
5673       secondary opcode.  Assume by default that OP2 is at bit 40; for
5674       those smaller insns that don't actually have a secondary opcode
5675       this will correctly result in OP2 = 0. */
5676    switch (op) {
5677    case 0x01: /* E */
5678    case 0x80: /* S */
5679    case 0x82: /* S */
5680    case 0x93: /* S */
5681    case 0xb2: /* S, RRF, RRE, IE */
5682    case 0xb3: /* RRE, RRD, RRF */
5683    case 0xb9: /* RRE, RRF */
5684    case 0xe5: /* SSE, SIL */
5685        op2 = (insn << 8) >> 56;
5686        break;
5687    case 0xa5: /* RI */
5688    case 0xa7: /* RI */
5689    case 0xc0: /* RIL */
5690    case 0xc2: /* RIL */
5691    case 0xc4: /* RIL */
5692    case 0xc6: /* RIL */
5693    case 0xc8: /* SSF */
5694    case 0xcc: /* RIL */
5695        op2 = (insn << 12) >> 60;
5696        break;
5697    case 0xc5: /* MII */
5698    case 0xc7: /* SMI */
5699    case 0xd0 ... 0xdf: /* SS */
5700    case 0xe1: /* SS */
5701    case 0xe2: /* SS */
5702    case 0xe8: /* SS */
5703    case 0xe9: /* SS */
5704    case 0xea: /* SS */
5705    case 0xee ... 0xf3: /* SS */
5706    case 0xf8 ... 0xfd: /* SS */
5707        op2 = 0;
5708        break;
5709    default:
5710        op2 = (insn << 40) >> 56;
5711        break;
5712    }
5713
5714    memset(f, 0, sizeof(*f));
5715    f->raw_insn = insn;
5716    f->op = op;
5717    f->op2 = op2;
5718
5719    /* Lookup the instruction.  */
5720    info = lookup_opc(op << 8 | op2);
5721
5722    /* If we found it, extract the operands.  */
5723    if (info != NULL) {
5724        DisasFormat fmt = info->fmt;
5725        int i;
5726
5727        for (i = 0; i < NUM_C_FIELD; ++i) {
5728            extract_field(f, &format_info[fmt].op[i], insn);
5729        }
5730    }
5731    return info;
5732}
5733
5734static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5735{
5736    const DisasInsn *insn;
5737    ExitStatus ret = NO_EXIT;
5738    DisasFields f;
5739    DisasOps o;
5740
5741    /* Search for the insn in the table.  */
5742    insn = extract_insn(env, s, &f);
5743
5744    /* Not found means unimplemented/illegal opcode.  */
5745    if (insn == NULL) {
5746        qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5747                      f.op, f.op2);
5748        gen_illegal_opcode(s);
5749        return EXIT_NORETURN;
5750    }
5751
5752#ifndef CONFIG_USER_ONLY
5753    if (s->tb->flags & FLAG_MASK_PER) {
5754        TCGv_i64 addr = tcg_const_i64(s->pc);
5755        gen_helper_per_ifetch(cpu_env, addr);
5756        tcg_temp_free_i64(addr);
5757    }
5758#endif
5759
5760    /* Check for insn specification exceptions.  */
5761    if (insn->spec) {
5762        int spec = insn->spec, excp = 0, r;
5763
5764        if (spec & SPEC_r1_even) {
5765            r = get_field(&f, r1);
5766            if (r & 1) {
5767                excp = PGM_SPECIFICATION;
5768            }
5769        }
5770        if (spec & SPEC_r2_even) {
5771            r = get_field(&f, r2);
5772            if (r & 1) {
5773                excp = PGM_SPECIFICATION;
5774            }
5775        }
5776        if (spec & SPEC_r3_even) {
5777            r = get_field(&f, r3);
5778            if (r & 1) {
5779                excp = PGM_SPECIFICATION;
5780            }
5781        }
5782        if (spec & SPEC_r1_f128) {
5783            r = get_field(&f, r1);
5784            if (r > 13) {
5785                excp = PGM_SPECIFICATION;
5786            }
5787        }
5788        if (spec & SPEC_r2_f128) {
5789            r = get_field(&f, r2);
5790            if (r > 13) {
5791                excp = PGM_SPECIFICATION;
5792            }
5793        }
5794        if (excp) {
5795            gen_program_exception(s, excp);
5796            return EXIT_NORETURN;
5797        }
5798    }
5799
5800    /* Set up the strutures we use to communicate with the helpers. */
5801    s->insn = insn;
5802    s->fields = &f;
5803    o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5804    TCGV_UNUSED_I64(o.out);
5805    TCGV_UNUSED_I64(o.out2);
5806    TCGV_UNUSED_I64(o.in1);
5807    TCGV_UNUSED_I64(o.in2);
5808    TCGV_UNUSED_I64(o.addr1);
5809
5810    /* Implement the instruction.  */
5811    if (insn->help_in1) {
5812        insn->help_in1(s, &f, &o);
5813    }
5814    if (insn->help_in2) {
5815        insn->help_in2(s, &f, &o);
5816    }
5817    if (insn->help_prep) {
5818        insn->help_prep(s, &f, &o);
5819    }
5820    if (insn->help_op) {
5821        ret = insn->help_op(s, &o);
5822    }
5823    if (insn->help_wout) {
5824        insn->help_wout(s, &f, &o);
5825    }
5826    if (insn->help_cout) {
5827        insn->help_cout(s, &o);
5828    }
5829
5830    /* Free any temporaries created by the helpers.  */
5831    if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5832        tcg_temp_free_i64(o.out);
5833    }
5834    if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5835        tcg_temp_free_i64(o.out2);
5836    }
5837    if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5838        tcg_temp_free_i64(o.in1);
5839    }
5840    if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5841        tcg_temp_free_i64(o.in2);
5842    }
5843    if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5844        tcg_temp_free_i64(o.addr1);
5845    }
5846
5847#ifndef CONFIG_USER_ONLY
5848    if (s->tb->flags & FLAG_MASK_PER) {
5849        /* An exception might be triggered, save PSW if not already done.  */
5850        if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5851            tcg_gen_movi_i64(psw_addr, s->next_pc);
5852        }
5853
5854        /* Save off cc.  */
5855        update_cc_op(s);
5856
5857        /* Call the helper to check for a possible PER exception.  */
5858        gen_helper_per_check_exception(cpu_env);
5859    }
5860#endif
5861
5862    /* Advance to the next instruction.  */
5863    s->pc = s->next_pc;
5864    return ret;
5865}
5866
5867void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
5868{
5869    CPUS390XState *env = cs->env_ptr;
5870    DisasContext dc;
5871    target_ulong pc_start;
5872    uint64_t next_page_start;
5873    int num_insns, max_insns;
5874    ExitStatus status;
5875    bool do_debug;
5876
5877    pc_start = tb->pc;
5878
5879    /* 31-bit mode */
5880    if (!(tb->flags & FLAG_MASK_64)) {
5881        pc_start &= 0x7fffffff;
5882    }
5883
5884    dc.tb = tb;
5885    dc.pc = pc_start;
5886    dc.cc_op = CC_OP_DYNAMIC;
5887    dc.ex_value = tb->cs_base;
5888    do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5889
5890    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5891
5892    num_insns = 0;
5893    max_insns = tb_cflags(tb) & CF_COUNT_MASK;
5894    if (max_insns == 0) {
5895        max_insns = CF_COUNT_MASK;
5896    }
5897    if (max_insns > TCG_MAX_INSNS) {
5898        max_insns = TCG_MAX_INSNS;
5899    }
5900
5901    gen_tb_start(tb);
5902
5903    do {
5904        tcg_gen_insn_start(dc.pc, dc.cc_op);
5905        num_insns++;
5906
5907        if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5908            status = EXIT_PC_STALE;
5909            do_debug = true;
5910            /* The address covered by the breakpoint must be included in
5911               [tb->pc, tb->pc + tb->size) in order to for it to be
5912               properly cleared -- thus we increment the PC here so that
5913               the logic setting tb->size below does the right thing.  */
5914            dc.pc += 2;
5915            break;
5916        }
5917
5918        if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
5919            gen_io_start();
5920        }
5921
5922        status = translate_one(env, &dc);
5923
5924        /* If we reach a page boundary, are single stepping,
5925           or exhaust instruction count, stop generation.  */
5926        if (status == NO_EXIT
5927            && (dc.pc >= next_page_start
5928                || tcg_op_buf_full()
5929                || num_insns >= max_insns
5930                || singlestep
5931                || cs->singlestep_enabled
5932                || dc.ex_value)) {
5933            status = EXIT_PC_STALE;
5934        }
5935    } while (status == NO_EXIT);
5936
5937    if (tb_cflags(tb) & CF_LAST_IO) {
5938        gen_io_end();
5939    }
5940
5941    switch (status) {
5942    case EXIT_GOTO_TB:
5943    case EXIT_NORETURN:
5944        break;
5945    case EXIT_PC_STALE:
5946    case EXIT_PC_STALE_NOCHAIN:
5947        update_psw_addr(&dc);
5948        /* FALLTHRU */
5949    case EXIT_PC_UPDATED:
5950        /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5951           cc op type is in env */
5952        update_cc_op(&dc);
5953        /* FALLTHRU */
5954    case EXIT_PC_CC_UPDATED:
5955        /* Exit the TB, either by raising a debug exception or by return.  */
5956        if (do_debug) {
5957            gen_exception(EXCP_DEBUG);
5958        } else if (use_exit_tb(&dc) || status == EXIT_PC_STALE_NOCHAIN) {
5959            tcg_gen_exit_tb(0);
5960        } else {
5961            tcg_gen_lookup_and_goto_ptr();
5962        }
5963        break;
5964    default:
5965        g_assert_not_reached();
5966    }
5967
5968    gen_tb_end(tb, num_insns);
5969
5970    tb->size = dc.pc - pc_start;
5971    tb->icount = num_insns;
5972
5973#if defined(S390X_DEBUG_DISAS)
5974    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5975        && qemu_log_in_addr_range(pc_start)) {
5976        qemu_log_lock();
5977        if (unlikely(dc.ex_value)) {
5978            /* ??? Unfortunately log_target_disas can't use host memory.  */
5979            qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
5980        } else {
5981            qemu_log("IN: %s\n", lookup_symbol(pc_start));
5982            log_target_disas(cs, pc_start, dc.pc - pc_start);
5983            qemu_log("\n");
5984        }
5985        qemu_log_unlock();
5986    }
5987#endif
5988}
5989
5990void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5991                          target_ulong *data)
5992{
5993    int cc_op = data[1];
5994    env->psw.addr = data[0];
5995    if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5996        env->cc_op = cc_op;
5997    }
5998}
5999