qemu/target-s390x/translate.c
<<
>>
Prefs
   1/*
   2 *  S/390 translation
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2010 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21/* #define DEBUG_INLINE_BRANCHES */
  22#define S390X_DEBUG_DISAS
  23/* #define S390X_DEBUG_DISAS_VERBOSE */
  24
  25#ifdef S390X_DEBUG_DISAS_VERBOSE
  26#  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
  27#else
  28#  define LOG_DISAS(...) do { } while (0)
  29#endif
  30
  31#include "qemu/osdep.h"
  32#include "cpu.h"
  33#include "disas/disas.h"
  34#include "exec/exec-all.h"
  35#include "tcg-op.h"
  36#include "qemu/log.h"
  37#include "qemu/host-utils.h"
  38#include "exec/cpu_ldst.h"
  39
  40/* global register indexes */
  41static TCGv_env cpu_env;
  42
  43#include "exec/gen-icount.h"
  44#include "exec/helper-proto.h"
  45#include "exec/helper-gen.h"
  46
  47#include "trace-tcg.h"
  48#include "exec/log.h"
  49
  50
  51/* Information that (most) every instruction needs to manipulate.  */
  52typedef struct DisasContext DisasContext;
  53typedef struct DisasInsn DisasInsn;
  54typedef struct DisasFields DisasFields;
  55
  56struct DisasContext {
  57    struct TranslationBlock *tb;
  58    const DisasInsn *insn;
  59    DisasFields *fields;
  60    uint64_t pc, next_pc;
  61    enum cc_op cc_op;
  62    bool singlestep_enabled;
  63};
  64
  65/* Information carried about a condition to be evaluated.  */
  66typedef struct {
  67    TCGCond cond:8;
  68    bool is_64;
  69    bool g1;
  70    bool g2;
  71    union {
  72        struct { TCGv_i64 a, b; } s64;
  73        struct { TCGv_i32 a, b; } s32;
  74    } u;
  75} DisasCompare;
  76
  77#define DISAS_EXCP 4
  78
  79#ifdef DEBUG_INLINE_BRANCHES
  80static uint64_t inline_branch_hit[CC_OP_MAX];
  81static uint64_t inline_branch_miss[CC_OP_MAX];
  82#endif
  83
  84static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
  85{
  86    if (!(s->tb->flags & FLAG_MASK_64)) {
  87        if (s->tb->flags & FLAG_MASK_32) {
  88            return pc | 0x80000000;
  89        }
  90    }
  91    return pc;
  92}
  93
  94void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
  95                         int flags)
  96{
  97    S390CPU *cpu = S390_CPU(cs);
  98    CPUS390XState *env = &cpu->env;
  99    int i;
 100
 101    if (env->cc_op > 3) {
 102        cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
 103                    env->psw.mask, env->psw.addr, cc_name(env->cc_op));
 104    } else {
 105        cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
 106                    env->psw.mask, env->psw.addr, env->cc_op);
 107    }
 108
 109    for (i = 0; i < 16; i++) {
 110        cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
 111        if ((i % 4) == 3) {
 112            cpu_fprintf(f, "\n");
 113        } else {
 114            cpu_fprintf(f, " ");
 115        }
 116    }
 117
 118    for (i = 0; i < 16; i++) {
 119        cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
 120        if ((i % 4) == 3) {
 121            cpu_fprintf(f, "\n");
 122        } else {
 123            cpu_fprintf(f, " ");
 124        }
 125    }
 126
 127    for (i = 0; i < 32; i++) {
 128        cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
 129                    env->vregs[i][0].ll, env->vregs[i][1].ll);
 130        cpu_fprintf(f, (i % 2) ? "\n" : " ");
 131    }
 132
 133#ifndef CONFIG_USER_ONLY
 134    for (i = 0; i < 16; i++) {
 135        cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
 136        if ((i % 4) == 3) {
 137            cpu_fprintf(f, "\n");
 138        } else {
 139            cpu_fprintf(f, " ");
 140        }
 141    }
 142#endif
 143
 144#ifdef DEBUG_INLINE_BRANCHES
 145    for (i = 0; i < CC_OP_MAX; i++) {
 146        cpu_fprintf(f, "  %15s = %10ld\t%10ld\n", cc_name(i),
 147                    inline_branch_miss[i], inline_branch_hit[i]);
 148    }
 149#endif
 150
 151    cpu_fprintf(f, "\n");
 152}
 153
 154static TCGv_i64 psw_addr;
 155static TCGv_i64 psw_mask;
 156static TCGv_i64 gbea;
 157
 158static TCGv_i32 cc_op;
 159static TCGv_i64 cc_src;
 160static TCGv_i64 cc_dst;
 161static TCGv_i64 cc_vr;
 162
 163static char cpu_reg_names[32][4];
 164static TCGv_i64 regs[16];
 165static TCGv_i64 fregs[16];
 166
 167void s390x_translate_init(void)
 168{
 169    int i;
 170
 171    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
 172    tcg_ctx.tcg_env = cpu_env;
 173    psw_addr = tcg_global_mem_new_i64(cpu_env,
 174                                      offsetof(CPUS390XState, psw.addr),
 175                                      "psw_addr");
 176    psw_mask = tcg_global_mem_new_i64(cpu_env,
 177                                      offsetof(CPUS390XState, psw.mask),
 178                                      "psw_mask");
 179    gbea = tcg_global_mem_new_i64(cpu_env,
 180                                  offsetof(CPUS390XState, gbea),
 181                                  "gbea");
 182
 183    cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
 184                                   "cc_op");
 185    cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
 186                                    "cc_src");
 187    cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
 188                                    "cc_dst");
 189    cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
 190                                   "cc_vr");
 191
 192    for (i = 0; i < 16; i++) {
 193        snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
 194        regs[i] = tcg_global_mem_new(cpu_env,
 195                                     offsetof(CPUS390XState, regs[i]),
 196                                     cpu_reg_names[i]);
 197    }
 198
 199    for (i = 0; i < 16; i++) {
 200        snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
 201        fregs[i] = tcg_global_mem_new(cpu_env,
 202                                      offsetof(CPUS390XState, vregs[i][0].d),
 203                                      cpu_reg_names[i + 16]);
 204    }
 205}
 206
 207static TCGv_i64 load_reg(int reg)
 208{
 209    TCGv_i64 r = tcg_temp_new_i64();
 210    tcg_gen_mov_i64(r, regs[reg]);
 211    return r;
 212}
 213
 214static TCGv_i64 load_freg32_i64(int reg)
 215{
 216    TCGv_i64 r = tcg_temp_new_i64();
 217    tcg_gen_shri_i64(r, fregs[reg], 32);
 218    return r;
 219}
 220
 221static void store_reg(int reg, TCGv_i64 v)
 222{
 223    tcg_gen_mov_i64(regs[reg], v);
 224}
 225
 226static void store_freg(int reg, TCGv_i64 v)
 227{
 228    tcg_gen_mov_i64(fregs[reg], v);
 229}
 230
 231static void store_reg32_i64(int reg, TCGv_i64 v)
 232{
 233    /* 32 bit register writes keep the upper half */
 234    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
 235}
 236
 237static void store_reg32h_i64(int reg, TCGv_i64 v)
 238{
 239    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
 240}
 241
 242static void store_freg32_i64(int reg, TCGv_i64 v)
 243{
 244    tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
 245}
 246
 247static void return_low128(TCGv_i64 dest)
 248{
 249    tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
 250}
 251
 252static void update_psw_addr(DisasContext *s)
 253{
 254    /* psw.addr */
 255    tcg_gen_movi_i64(psw_addr, s->pc);
 256}
 257
 258static void per_branch(DisasContext *s, bool to_next)
 259{
 260#ifndef CONFIG_USER_ONLY
 261    tcg_gen_movi_i64(gbea, s->pc);
 262
 263    if (s->tb->flags & FLAG_MASK_PER) {
 264        TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
 265        gen_helper_per_branch(cpu_env, gbea, next_pc);
 266        if (to_next) {
 267            tcg_temp_free_i64(next_pc);
 268        }
 269    }
 270#endif
 271}
 272
 273static void per_branch_cond(DisasContext *s, TCGCond cond,
 274                            TCGv_i64 arg1, TCGv_i64 arg2)
 275{
 276#ifndef CONFIG_USER_ONLY
 277    if (s->tb->flags & FLAG_MASK_PER) {
 278        TCGLabel *lab = gen_new_label();
 279        tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
 280
 281        tcg_gen_movi_i64(gbea, s->pc);
 282        gen_helper_per_branch(cpu_env, gbea, psw_addr);
 283
 284        gen_set_label(lab);
 285    } else {
 286        TCGv_i64 pc = tcg_const_i64(s->pc);
 287        tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
 288        tcg_temp_free_i64(pc);
 289    }
 290#endif
 291}
 292
 293static void per_breaking_event(DisasContext *s)
 294{
 295    tcg_gen_movi_i64(gbea, s->pc);
 296}
 297
 298static void update_cc_op(DisasContext *s)
 299{
 300    if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
 301        tcg_gen_movi_i32(cc_op, s->cc_op);
 302    }
 303}
 304
 305static void potential_page_fault(DisasContext *s)
 306{
 307    update_psw_addr(s);
 308    update_cc_op(s);
 309}
 310
 311static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
 312{
 313    return (uint64_t)cpu_lduw_code(env, pc);
 314}
 315
 316static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
 317{
 318    return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
 319}
 320
 321static int get_mem_index(DisasContext *s)
 322{
 323    switch (s->tb->flags & FLAG_MASK_ASC) {
 324    case PSW_ASC_PRIMARY >> 32:
 325        return 0;
 326    case PSW_ASC_SECONDARY >> 32:
 327        return 1;
 328    case PSW_ASC_HOME >> 32:
 329        return 2;
 330    default:
 331        tcg_abort();
 332        break;
 333    }
 334}
 335
 336static void gen_exception(int excp)
 337{
 338    TCGv_i32 tmp = tcg_const_i32(excp);
 339    gen_helper_exception(cpu_env, tmp);
 340    tcg_temp_free_i32(tmp);
 341}
 342
 343static void gen_program_exception(DisasContext *s, int code)
 344{
 345    TCGv_i32 tmp;
 346
 347    /* Remember what pgm exeption this was.  */
 348    tmp = tcg_const_i32(code);
 349    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
 350    tcg_temp_free_i32(tmp);
 351
 352    tmp = tcg_const_i32(s->next_pc - s->pc);
 353    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
 354    tcg_temp_free_i32(tmp);
 355
 356    /* Advance past instruction.  */
 357    s->pc = s->next_pc;
 358    update_psw_addr(s);
 359
 360    /* Save off cc.  */
 361    update_cc_op(s);
 362
 363    /* Trigger exception.  */
 364    gen_exception(EXCP_PGM);
 365}
 366
 367static inline void gen_illegal_opcode(DisasContext *s)
 368{
 369    gen_program_exception(s, PGM_OPERATION);
 370}
 371
 372static inline void gen_trap(DisasContext *s)
 373{
 374    TCGv_i32 t;
 375
 376    /* Set DXC to 0xff.  */
 377    t = tcg_temp_new_i32();
 378    tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
 379    tcg_gen_ori_i32(t, t, 0xff00);
 380    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
 381    tcg_temp_free_i32(t);
 382
 383    gen_program_exception(s, PGM_DATA);
 384}
 385
 386#ifndef CONFIG_USER_ONLY
 387static void check_privileged(DisasContext *s)
 388{
 389    if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
 390        gen_program_exception(s, PGM_PRIVILEGED);
 391    }
 392}
 393#endif
 394
 395static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
 396{
 397    TCGv_i64 tmp = tcg_temp_new_i64();
 398    bool need_31 = !(s->tb->flags & FLAG_MASK_64);
 399
 400    /* Note that d2 is limited to 20 bits, signed.  If we crop negative
 401       displacements early we create larger immedate addends.  */
 402
 403    /* Note that addi optimizes the imm==0 case.  */
 404    if (b2 && x2) {
 405        tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
 406        tcg_gen_addi_i64(tmp, tmp, d2);
 407    } else if (b2) {
 408        tcg_gen_addi_i64(tmp, regs[b2], d2);
 409    } else if (x2) {
 410        tcg_gen_addi_i64(tmp, regs[x2], d2);
 411    } else {
 412        if (need_31) {
 413            d2 &= 0x7fffffff;
 414            need_31 = false;
 415        }
 416        tcg_gen_movi_i64(tmp, d2);
 417    }
 418    if (need_31) {
 419        tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
 420    }
 421
 422    return tmp;
 423}
 424
 425static inline bool live_cc_data(DisasContext *s)
 426{
 427    return (s->cc_op != CC_OP_DYNAMIC
 428            && s->cc_op != CC_OP_STATIC
 429            && s->cc_op > 3);
 430}
 431
 432static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
 433{
 434    if (live_cc_data(s)) {
 435        tcg_gen_discard_i64(cc_src);
 436        tcg_gen_discard_i64(cc_dst);
 437        tcg_gen_discard_i64(cc_vr);
 438    }
 439    s->cc_op = CC_OP_CONST0 + val;
 440}
 441
 442static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
 443{
 444    if (live_cc_data(s)) {
 445        tcg_gen_discard_i64(cc_src);
 446        tcg_gen_discard_i64(cc_vr);
 447    }
 448    tcg_gen_mov_i64(cc_dst, dst);
 449    s->cc_op = op;
 450}
 451
 452static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 453                                  TCGv_i64 dst)
 454{
 455    if (live_cc_data(s)) {
 456        tcg_gen_discard_i64(cc_vr);
 457    }
 458    tcg_gen_mov_i64(cc_src, src);
 459    tcg_gen_mov_i64(cc_dst, dst);
 460    s->cc_op = op;
 461}
 462
 463static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 464                                  TCGv_i64 dst, TCGv_i64 vr)
 465{
 466    tcg_gen_mov_i64(cc_src, src);
 467    tcg_gen_mov_i64(cc_dst, dst);
 468    tcg_gen_mov_i64(cc_vr, vr);
 469    s->cc_op = op;
 470}
 471
 472static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
 473{
 474    gen_op_update1_cc_i64(s, CC_OP_NZ, val);
 475}
 476
 477static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
 478{
 479    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
 480}
 481
 482static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
 483{
 484    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
 485}
 486
 487static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
 488{
 489    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
 490}
 491
 492/* CC value is in env->cc_op */
 493static void set_cc_static(DisasContext *s)
 494{
 495    if (live_cc_data(s)) {
 496        tcg_gen_discard_i64(cc_src);
 497        tcg_gen_discard_i64(cc_dst);
 498        tcg_gen_discard_i64(cc_vr);
 499    }
 500    s->cc_op = CC_OP_STATIC;
 501}
 502
 503/* calculates cc into cc_op */
 504static void gen_op_calc_cc(DisasContext *s)
 505{
 506    TCGv_i32 local_cc_op;
 507    TCGv_i64 dummy;
 508
 509    TCGV_UNUSED_I32(local_cc_op);
 510    TCGV_UNUSED_I64(dummy);
 511    switch (s->cc_op) {
 512    default:
 513        dummy = tcg_const_i64(0);
 514        /* FALLTHRU */
 515    case CC_OP_ADD_64:
 516    case CC_OP_ADDU_64:
 517    case CC_OP_ADDC_64:
 518    case CC_OP_SUB_64:
 519    case CC_OP_SUBU_64:
 520    case CC_OP_SUBB_64:
 521    case CC_OP_ADD_32:
 522    case CC_OP_ADDU_32:
 523    case CC_OP_ADDC_32:
 524    case CC_OP_SUB_32:
 525    case CC_OP_SUBU_32:
 526    case CC_OP_SUBB_32:
 527        local_cc_op = tcg_const_i32(s->cc_op);
 528        break;
 529    case CC_OP_CONST0:
 530    case CC_OP_CONST1:
 531    case CC_OP_CONST2:
 532    case CC_OP_CONST3:
 533    case CC_OP_STATIC:
 534    case CC_OP_DYNAMIC:
 535        break;
 536    }
 537
 538    switch (s->cc_op) {
 539    case CC_OP_CONST0:
 540    case CC_OP_CONST1:
 541    case CC_OP_CONST2:
 542    case CC_OP_CONST3:
 543        /* s->cc_op is the cc value */
 544        tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
 545        break;
 546    case CC_OP_STATIC:
 547        /* env->cc_op already is the cc value */
 548        break;
 549    case CC_OP_NZ:
 550    case CC_OP_ABS_64:
 551    case CC_OP_NABS_64:
 552    case CC_OP_ABS_32:
 553    case CC_OP_NABS_32:
 554    case CC_OP_LTGT0_32:
 555    case CC_OP_LTGT0_64:
 556    case CC_OP_COMP_32:
 557    case CC_OP_COMP_64:
 558    case CC_OP_NZ_F32:
 559    case CC_OP_NZ_F64:
 560    case CC_OP_FLOGR:
 561        /* 1 argument */
 562        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
 563        break;
 564    case CC_OP_ICM:
 565    case CC_OP_LTGT_32:
 566    case CC_OP_LTGT_64:
 567    case CC_OP_LTUGTU_32:
 568    case CC_OP_LTUGTU_64:
 569    case CC_OP_TM_32:
 570    case CC_OP_TM_64:
 571    case CC_OP_SLA_32:
 572    case CC_OP_SLA_64:
 573    case CC_OP_NZ_F128:
 574        /* 2 arguments */
 575        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
 576        break;
 577    case CC_OP_ADD_64:
 578    case CC_OP_ADDU_64:
 579    case CC_OP_ADDC_64:
 580    case CC_OP_SUB_64:
 581    case CC_OP_SUBU_64:
 582    case CC_OP_SUBB_64:
 583    case CC_OP_ADD_32:
 584    case CC_OP_ADDU_32:
 585    case CC_OP_ADDC_32:
 586    case CC_OP_SUB_32:
 587    case CC_OP_SUBU_32:
 588    case CC_OP_SUBB_32:
 589        /* 3 arguments */
 590        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
 591        break;
 592    case CC_OP_DYNAMIC:
 593        /* unknown operation - assume 3 arguments and cc_op in env */
 594        gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
 595        break;
 596    default:
 597        tcg_abort();
 598    }
 599
 600    if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
 601        tcg_temp_free_i32(local_cc_op);
 602    }
 603    if (!TCGV_IS_UNUSED_I64(dummy)) {
 604        tcg_temp_free_i64(dummy);
 605    }
 606
 607    /* We now have cc in cc_op as constant */
 608    set_cc_static(s);
 609}
 610
 611static int use_goto_tb(DisasContext *s, uint64_t dest)
 612{
 613    if (unlikely(s->singlestep_enabled) ||
 614        (s->tb->cflags & CF_LAST_IO) ||
 615        (s->tb->flags & FLAG_MASK_PER)) {
 616        return false;
 617    }
 618#ifndef CONFIG_USER_ONLY
 619    return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
 620           (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
 621#else
 622    return true;
 623#endif
 624}
 625
 626static void account_noninline_branch(DisasContext *s, int cc_op)
 627{
 628#ifdef DEBUG_INLINE_BRANCHES
 629    inline_branch_miss[cc_op]++;
 630#endif
 631}
 632
 633static void account_inline_branch(DisasContext *s, int cc_op)
 634{
 635#ifdef DEBUG_INLINE_BRANCHES
 636    inline_branch_hit[cc_op]++;
 637#endif
 638}
 639
 640/* Table of mask values to comparison codes, given a comparison as input.
 641   For such, CC=3 should not be possible.  */
 642static const TCGCond ltgt_cond[16] = {
 643    TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
 644    TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
 645    TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
 646    TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
 647    TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
 648    TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
 649    TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
 650    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
 651};
 652
 653/* Table of mask values to comparison codes, given a logic op as input.
 654   For such, only CC=0 and CC=1 should be possible.  */
 655static const TCGCond nz_cond[16] = {
 656    TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
 657    TCG_COND_NEVER, TCG_COND_NEVER,
 658    TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
 659    TCG_COND_NE, TCG_COND_NE,
 660    TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
 661    TCG_COND_EQ, TCG_COND_EQ,
 662    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
 663    TCG_COND_ALWAYS, TCG_COND_ALWAYS,
 664};
 665
 666/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
 667   details required to generate a TCG comparison.  */
 668static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
 669{
 670    TCGCond cond;
 671    enum cc_op old_cc_op = s->cc_op;
 672
 673    if (mask == 15 || mask == 0) {
 674        c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
 675        c->u.s32.a = cc_op;
 676        c->u.s32.b = cc_op;
 677        c->g1 = c->g2 = true;
 678        c->is_64 = false;
 679        return;
 680    }
 681
 682    /* Find the TCG condition for the mask + cc op.  */
 683    switch (old_cc_op) {
 684    case CC_OP_LTGT0_32:
 685    case CC_OP_LTGT0_64:
 686    case CC_OP_LTGT_32:
 687    case CC_OP_LTGT_64:
 688        cond = ltgt_cond[mask];
 689        if (cond == TCG_COND_NEVER) {
 690            goto do_dynamic;
 691        }
 692        account_inline_branch(s, old_cc_op);
 693        break;
 694
 695    case CC_OP_LTUGTU_32:
 696    case CC_OP_LTUGTU_64:
 697        cond = tcg_unsigned_cond(ltgt_cond[mask]);
 698        if (cond == TCG_COND_NEVER) {
 699            goto do_dynamic;
 700        }
 701        account_inline_branch(s, old_cc_op);
 702        break;
 703
 704    case CC_OP_NZ:
 705        cond = nz_cond[mask];
 706        if (cond == TCG_COND_NEVER) {
 707            goto do_dynamic;
 708        }
 709        account_inline_branch(s, old_cc_op);
 710        break;
 711
 712    case CC_OP_TM_32:
 713    case CC_OP_TM_64:
 714        switch (mask) {
 715        case 8:
 716            cond = TCG_COND_EQ;
 717            break;
 718        case 4 | 2 | 1:
 719            cond = TCG_COND_NE;
 720            break;
 721        default:
 722            goto do_dynamic;
 723        }
 724        account_inline_branch(s, old_cc_op);
 725        break;
 726
 727    case CC_OP_ICM:
 728        switch (mask) {
 729        case 8:
 730            cond = TCG_COND_EQ;
 731            break;
 732        case 4 | 2 | 1:
 733        case 4 | 2:
 734            cond = TCG_COND_NE;
 735            break;
 736        default:
 737            goto do_dynamic;
 738        }
 739        account_inline_branch(s, old_cc_op);
 740        break;
 741
 742    case CC_OP_FLOGR:
 743        switch (mask & 0xa) {
 744        case 8: /* src == 0 -> no one bit found */
 745            cond = TCG_COND_EQ;
 746            break;
 747        case 2: /* src != 0 -> one bit found */
 748            cond = TCG_COND_NE;
 749            break;
 750        default:
 751            goto do_dynamic;
 752        }
 753        account_inline_branch(s, old_cc_op);
 754        break;
 755
 756    case CC_OP_ADDU_32:
 757    case CC_OP_ADDU_64:
 758        switch (mask) {
 759        case 8 | 2: /* vr == 0 */
 760            cond = TCG_COND_EQ;
 761            break;
 762        case 4 | 1: /* vr != 0 */
 763            cond = TCG_COND_NE;
 764            break;
 765        case 8 | 4: /* no carry -> vr >= src */
 766            cond = TCG_COND_GEU;
 767            break;
 768        case 2 | 1: /* carry -> vr < src */
 769            cond = TCG_COND_LTU;
 770            break;
 771        default:
 772            goto do_dynamic;
 773        }
 774        account_inline_branch(s, old_cc_op);
 775        break;
 776
 777    case CC_OP_SUBU_32:
 778    case CC_OP_SUBU_64:
 779        /* Note that CC=0 is impossible; treat it as dont-care.  */
 780        switch (mask & 7) {
 781        case 2: /* zero -> op1 == op2 */
 782            cond = TCG_COND_EQ;
 783            break;
 784        case 4 | 1: /* !zero -> op1 != op2 */
 785            cond = TCG_COND_NE;
 786            break;
 787        case 4: /* borrow (!carry) -> op1 < op2 */
 788            cond = TCG_COND_LTU;
 789            break;
 790        case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
 791            cond = TCG_COND_GEU;
 792            break;
 793        default:
 794            goto do_dynamic;
 795        }
 796        account_inline_branch(s, old_cc_op);
 797        break;
 798
 799    default:
 800    do_dynamic:
 801        /* Calculate cc value.  */
 802        gen_op_calc_cc(s);
 803        /* FALLTHRU */
 804
 805    case CC_OP_STATIC:
 806        /* Jump based on CC.  We'll load up the real cond below;
 807           the assignment here merely avoids a compiler warning.  */
 808        account_noninline_branch(s, old_cc_op);
 809        old_cc_op = CC_OP_STATIC;
 810        cond = TCG_COND_NEVER;
 811        break;
 812    }
 813
 814    /* Load up the arguments of the comparison.  */
 815    c->is_64 = true;
 816    c->g1 = c->g2 = false;
 817    switch (old_cc_op) {
 818    case CC_OP_LTGT0_32:
 819        c->is_64 = false;
 820        c->u.s32.a = tcg_temp_new_i32();
 821        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
 822        c->u.s32.b = tcg_const_i32(0);
 823        break;
 824    case CC_OP_LTGT_32:
 825    case CC_OP_LTUGTU_32:
 826    case CC_OP_SUBU_32:
 827        c->is_64 = false;
 828        c->u.s32.a = tcg_temp_new_i32();
 829        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
 830        c->u.s32.b = tcg_temp_new_i32();
 831        tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
 832        break;
 833
 834    case CC_OP_LTGT0_64:
 835    case CC_OP_NZ:
 836    case CC_OP_FLOGR:
 837        c->u.s64.a = cc_dst;
 838        c->u.s64.b = tcg_const_i64(0);
 839        c->g1 = true;
 840        break;
 841    case CC_OP_LTGT_64:
 842    case CC_OP_LTUGTU_64:
 843    case CC_OP_SUBU_64:
 844        c->u.s64.a = cc_src;
 845        c->u.s64.b = cc_dst;
 846        c->g1 = c->g2 = true;
 847        break;
 848
 849    case CC_OP_TM_32:
 850    case CC_OP_TM_64:
 851    case CC_OP_ICM:
 852        c->u.s64.a = tcg_temp_new_i64();
 853        c->u.s64.b = tcg_const_i64(0);
 854        tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
 855        break;
 856
 857    case CC_OP_ADDU_32:
 858        c->is_64 = false;
 859        c->u.s32.a = tcg_temp_new_i32();
 860        c->u.s32.b = tcg_temp_new_i32();
 861        tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
 862        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
 863            tcg_gen_movi_i32(c->u.s32.b, 0);
 864        } else {
 865            tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
 866        }
 867        break;
 868
 869    case CC_OP_ADDU_64:
 870        c->u.s64.a = cc_vr;
 871        c->g1 = true;
 872        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
 873            c->u.s64.b = tcg_const_i64(0);
 874        } else {
 875            c->u.s64.b = cc_src;
 876            c->g2 = true;
 877        }
 878        break;
 879
 880    case CC_OP_STATIC:
 881        c->is_64 = false;
 882        c->u.s32.a = cc_op;
 883        c->g1 = true;
 884        switch (mask) {
 885        case 0x8 | 0x4 | 0x2: /* cc != 3 */
 886            cond = TCG_COND_NE;
 887            c->u.s32.b = tcg_const_i32(3);
 888            break;
 889        case 0x8 | 0x4 | 0x1: /* cc != 2 */
 890            cond = TCG_COND_NE;
 891            c->u.s32.b = tcg_const_i32(2);
 892            break;
 893        case 0x8 | 0x2 | 0x1: /* cc != 1 */
 894            cond = TCG_COND_NE;
 895            c->u.s32.b = tcg_const_i32(1);
 896            break;
 897        case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
 898            cond = TCG_COND_EQ;
 899            c->g1 = false;
 900            c->u.s32.a = tcg_temp_new_i32();
 901            c->u.s32.b = tcg_const_i32(0);
 902            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 903            break;
 904        case 0x8 | 0x4: /* cc < 2 */
 905            cond = TCG_COND_LTU;
 906            c->u.s32.b = tcg_const_i32(2);
 907            break;
 908        case 0x8: /* cc == 0 */
 909            cond = TCG_COND_EQ;
 910            c->u.s32.b = tcg_const_i32(0);
 911            break;
 912        case 0x4 | 0x2 | 0x1: /* cc != 0 */
 913            cond = TCG_COND_NE;
 914            c->u.s32.b = tcg_const_i32(0);
 915            break;
 916        case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
 917            cond = TCG_COND_NE;
 918            c->g1 = false;
 919            c->u.s32.a = tcg_temp_new_i32();
 920            c->u.s32.b = tcg_const_i32(0);
 921            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 922            break;
 923        case 0x4: /* cc == 1 */
 924            cond = TCG_COND_EQ;
 925            c->u.s32.b = tcg_const_i32(1);
 926            break;
 927        case 0x2 | 0x1: /* cc > 1 */
 928            cond = TCG_COND_GTU;
 929            c->u.s32.b = tcg_const_i32(1);
 930            break;
 931        case 0x2: /* cc == 2 */
 932            cond = TCG_COND_EQ;
 933            c->u.s32.b = tcg_const_i32(2);
 934            break;
 935        case 0x1: /* cc == 3 */
 936            cond = TCG_COND_EQ;
 937            c->u.s32.b = tcg_const_i32(3);
 938            break;
 939        default:
 940            /* CC is masked by something else: (8 >> cc) & mask.  */
 941            cond = TCG_COND_NE;
 942            c->g1 = false;
 943            c->u.s32.a = tcg_const_i32(8);
 944            c->u.s32.b = tcg_const_i32(0);
 945            tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
 946            tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
 947            break;
 948        }
 949        break;
 950
 951    default:
 952        abort();
 953    }
 954    c->cond = cond;
 955}
 956
 957static void free_compare(DisasCompare *c)
 958{
 959    if (!c->g1) {
 960        if (c->is_64) {
 961            tcg_temp_free_i64(c->u.s64.a);
 962        } else {
 963            tcg_temp_free_i32(c->u.s32.a);
 964        }
 965    }
 966    if (!c->g2) {
 967        if (c->is_64) {
 968            tcg_temp_free_i64(c->u.s64.b);
 969        } else {
 970            tcg_temp_free_i32(c->u.s32.b);
 971        }
 972    }
 973}
 974
 975/* ====================================================================== */
 976/* Define the insn format enumeration.  */
 977#define F0(N)                         FMT_##N,
 978#define F1(N, X1)                     F0(N)
 979#define F2(N, X1, X2)                 F0(N)
 980#define F3(N, X1, X2, X3)             F0(N)
 981#define F4(N, X1, X2, X3, X4)         F0(N)
 982#define F5(N, X1, X2, X3, X4, X5)     F0(N)
 983
 984typedef enum {
 985#include "insn-format.def"
 986} DisasFormat;
 987
 988#undef F0
 989#undef F1
 990#undef F2
 991#undef F3
 992#undef F4
 993#undef F5
 994
 995/* Define a structure to hold the decoded fields.  We'll store each inside
 996   an array indexed by an enum.  In order to conserve memory, we'll arrange
 997   for fields that do not exist at the same time to overlap, thus the "C"
 998   for compact.  For checking purposes there is an "O" for original index
 999   as well that will be applied to availability bitmaps.  */
1000
1001enum DisasFieldIndexO {
1002    FLD_O_r1,
1003    FLD_O_r2,
1004    FLD_O_r3,
1005    FLD_O_m1,
1006    FLD_O_m3,
1007    FLD_O_m4,
1008    FLD_O_b1,
1009    FLD_O_b2,
1010    FLD_O_b4,
1011    FLD_O_d1,
1012    FLD_O_d2,
1013    FLD_O_d4,
1014    FLD_O_x2,
1015    FLD_O_l1,
1016    FLD_O_l2,
1017    FLD_O_i1,
1018    FLD_O_i2,
1019    FLD_O_i3,
1020    FLD_O_i4,
1021    FLD_O_i5
1022};
1023
1024enum DisasFieldIndexC {
1025    FLD_C_r1 = 0,
1026    FLD_C_m1 = 0,
1027    FLD_C_b1 = 0,
1028    FLD_C_i1 = 0,
1029
1030    FLD_C_r2 = 1,
1031    FLD_C_b2 = 1,
1032    FLD_C_i2 = 1,
1033
1034    FLD_C_r3 = 2,
1035    FLD_C_m3 = 2,
1036    FLD_C_i3 = 2,
1037
1038    FLD_C_m4 = 3,
1039    FLD_C_b4 = 3,
1040    FLD_C_i4 = 3,
1041    FLD_C_l1 = 3,
1042
1043    FLD_C_i5 = 4,
1044    FLD_C_d1 = 4,
1045
1046    FLD_C_d2 = 5,
1047
1048    FLD_C_d4 = 6,
1049    FLD_C_x2 = 6,
1050    FLD_C_l2 = 6,
1051
1052    NUM_C_FIELD = 7
1053};
1054
1055struct DisasFields {
1056    uint64_t raw_insn;
1057    unsigned op:8;
1058    unsigned op2:8;
1059    unsigned presentC:16;
1060    unsigned int presentO;
1061    int c[NUM_C_FIELD];
1062};
1063
1064/* This is the way fields are to be accessed out of DisasFields.  */
1065#define have_field(S, F)  have_field1((S), FLD_O_##F)
1066#define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1067
1068static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1069{
1070    return (f->presentO >> c) & 1;
1071}
1072
1073static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1074                      enum DisasFieldIndexC c)
1075{
1076    assert(have_field1(f, o));
1077    return f->c[c];
1078}
1079
1080/* Describe the layout of each field in each format.  */
1081typedef struct DisasField {
1082    unsigned int beg:8;
1083    unsigned int size:8;
1084    unsigned int type:2;
1085    unsigned int indexC:6;
1086    enum DisasFieldIndexO indexO:8;
1087} DisasField;
1088
1089typedef struct DisasFormatInfo {
1090    DisasField op[NUM_C_FIELD];
1091} DisasFormatInfo;
1092
1093#define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1094#define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1095#define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1096                      { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1097#define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1098                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1099                      { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1100#define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1101                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1102#define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1104                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1105#define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1106#define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1107
1108#define F0(N)                     { { } },
1109#define F1(N, X1)                 { { X1 } },
1110#define F2(N, X1, X2)             { { X1, X2 } },
1111#define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1112#define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1113#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1114
1115static const DisasFormatInfo format_info[] = {
1116#include "insn-format.def"
1117};
1118
1119#undef F0
1120#undef F1
1121#undef F2
1122#undef F3
1123#undef F4
1124#undef F5
1125#undef R
1126#undef M
1127#undef BD
1128#undef BXD
1129#undef BDL
1130#undef BXDL
1131#undef I
1132#undef L
1133
1134/* Generally, we'll extract operands into this structures, operate upon
1135   them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1136   of routines below for more details.  */
1137typedef struct {
1138    bool g_out, g_out2, g_in1, g_in2;
1139    TCGv_i64 out, out2, in1, in2;
1140    TCGv_i64 addr1;
1141} DisasOps;
1142
1143/* Instructions can place constraints on their operands, raising specification
1144   exceptions if they are violated.  To make this easy to automate, each "in1",
1145   "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1146   of the following, or 0.  To make this easy to document, we'll put the
1147   SPEC_<name> defines next to <name>.  */
1148
1149#define SPEC_r1_even    1
1150#define SPEC_r2_even    2
1151#define SPEC_r3_even    4
1152#define SPEC_r1_f128    8
1153#define SPEC_r2_f128    16
1154
1155/* Return values from translate_one, indicating the state of the TB.  */
1156typedef enum {
1157    /* Continue the TB.  */
1158    NO_EXIT,
1159    /* We have emitted one or more goto_tb.  No fixup required.  */
1160    EXIT_GOTO_TB,
1161    /* We are not using a goto_tb (for whatever reason), but have updated
1162       the PC (for whatever reason), so there's no need to do it again on
1163       exiting the TB.  */
1164    EXIT_PC_UPDATED,
1165    /* We are exiting the TB, but have neither emitted a goto_tb, nor
1166       updated the PC for the next instruction to be executed.  */
1167    EXIT_PC_STALE,
1168    /* We are ending the TB with a noreturn function call, e.g. longjmp.
1169       No following code will be executed.  */
1170    EXIT_NORETURN,
1171} ExitStatus;
1172
1173typedef enum DisasFacility {
1174    FAC_Z,                  /* zarch (default) */
1175    FAC_CASS,               /* compare and swap and store */
1176    FAC_CASS2,              /* compare and swap and store 2*/
1177    FAC_DFP,                /* decimal floating point */
1178    FAC_DFPR,               /* decimal floating point rounding */
1179    FAC_DO,                 /* distinct operands */
1180    FAC_EE,                 /* execute extensions */
1181    FAC_EI,                 /* extended immediate */
1182    FAC_FPE,                /* floating point extension */
1183    FAC_FPSSH,              /* floating point support sign handling */
1184    FAC_FPRGR,              /* FPR-GR transfer */
1185    FAC_GIE,                /* general instructions extension */
1186    FAC_HFP_MA,             /* HFP multiply-and-add/subtract */
1187    FAC_HW,                 /* high-word */
1188    FAC_IEEEE_SIM,          /* IEEE exception sumilation */
1189    FAC_MIE,                /* miscellaneous-instruction-extensions */
1190    FAC_LAT,                /* load-and-trap */
1191    FAC_LOC,                /* load/store on condition */
1192    FAC_LD,                 /* long displacement */
1193    FAC_PC,                 /* population count */
1194    FAC_SCF,                /* store clock fast */
1195    FAC_SFLE,               /* store facility list extended */
1196    FAC_ILA,                /* interlocked access facility 1 */
1197} DisasFacility;
1198
1199struct DisasInsn {
1200    unsigned opc:16;
1201    DisasFormat fmt:8;
1202    DisasFacility fac:8;
1203    unsigned spec:8;
1204
1205    const char *name;
1206
1207    void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1208    void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1209    void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1210    void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1211    void (*help_cout)(DisasContext *, DisasOps *);
1212    ExitStatus (*help_op)(DisasContext *, DisasOps *);
1213
1214    uint64_t data;
1215};
1216
1217/* ====================================================================== */
1218/* Miscellaneous helpers, used by several operations.  */
1219
1220static void help_l2_shift(DisasContext *s, DisasFields *f,
1221                          DisasOps *o, int mask)
1222{
1223    int b2 = get_field(f, b2);
1224    int d2 = get_field(f, d2);
1225
1226    if (b2 == 0) {
1227        o->in2 = tcg_const_i64(d2 & mask);
1228    } else {
1229        o->in2 = get_address(s, 0, b2, d2);
1230        tcg_gen_andi_i64(o->in2, o->in2, mask);
1231    }
1232}
1233
1234static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1235{
1236    if (dest == s->next_pc) {
1237        per_branch(s, true);
1238        return NO_EXIT;
1239    }
1240    if (use_goto_tb(s, dest)) {
1241        update_cc_op(s);
1242        per_breaking_event(s);
1243        tcg_gen_goto_tb(0);
1244        tcg_gen_movi_i64(psw_addr, dest);
1245        tcg_gen_exit_tb((uintptr_t)s->tb);
1246        return EXIT_GOTO_TB;
1247    } else {
1248        tcg_gen_movi_i64(psw_addr, dest);
1249        per_branch(s, false);
1250        return EXIT_PC_UPDATED;
1251    }
1252}
1253
1254static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1255                              bool is_imm, int imm, TCGv_i64 cdest)
1256{
1257    ExitStatus ret;
1258    uint64_t dest = s->pc + 2 * imm;
1259    TCGLabel *lab;
1260
1261    /* Take care of the special cases first.  */
1262    if (c->cond == TCG_COND_NEVER) {
1263        ret = NO_EXIT;
1264        goto egress;
1265    }
1266    if (is_imm) {
1267        if (dest == s->next_pc) {
1268            /* Branch to next.  */
1269            per_branch(s, true);
1270            ret = NO_EXIT;
1271            goto egress;
1272        }
1273        if (c->cond == TCG_COND_ALWAYS) {
1274            ret = help_goto_direct(s, dest);
1275            goto egress;
1276        }
1277    } else {
1278        if (TCGV_IS_UNUSED_I64(cdest)) {
1279            /* E.g. bcr %r0 -> no branch.  */
1280            ret = NO_EXIT;
1281            goto egress;
1282        }
1283        if (c->cond == TCG_COND_ALWAYS) {
1284            tcg_gen_mov_i64(psw_addr, cdest);
1285            per_branch(s, false);
1286            ret = EXIT_PC_UPDATED;
1287            goto egress;
1288        }
1289    }
1290
1291    if (use_goto_tb(s, s->next_pc)) {
1292        if (is_imm && use_goto_tb(s, dest)) {
1293            /* Both exits can use goto_tb.  */
1294            update_cc_op(s);
1295
1296            lab = gen_new_label();
1297            if (c->is_64) {
1298                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1299            } else {
1300                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1301            }
1302
1303            /* Branch not taken.  */
1304            tcg_gen_goto_tb(0);
1305            tcg_gen_movi_i64(psw_addr, s->next_pc);
1306            tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1307
1308            /* Branch taken.  */
1309            gen_set_label(lab);
1310            per_breaking_event(s);
1311            tcg_gen_goto_tb(1);
1312            tcg_gen_movi_i64(psw_addr, dest);
1313            tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1314
1315            ret = EXIT_GOTO_TB;
1316        } else {
1317            /* Fallthru can use goto_tb, but taken branch cannot.  */
1318            /* Store taken branch destination before the brcond.  This
1319               avoids having to allocate a new local temp to hold it.
1320               We'll overwrite this in the not taken case anyway.  */
1321            if (!is_imm) {
1322                tcg_gen_mov_i64(psw_addr, cdest);
1323            }
1324
1325            lab = gen_new_label();
1326            if (c->is_64) {
1327                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1328            } else {
1329                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1330            }
1331
1332            /* Branch not taken.  */
1333            update_cc_op(s);
1334            tcg_gen_goto_tb(0);
1335            tcg_gen_movi_i64(psw_addr, s->next_pc);
1336            tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1337
1338            gen_set_label(lab);
1339            if (is_imm) {
1340                tcg_gen_movi_i64(psw_addr, dest);
1341            }
1342            per_breaking_event(s);
1343            ret = EXIT_PC_UPDATED;
1344        }
1345    } else {
1346        /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1347           Most commonly we're single-stepping or some other condition that
1348           disables all use of goto_tb.  Just update the PC and exit.  */
1349
1350        TCGv_i64 next = tcg_const_i64(s->next_pc);
1351        if (is_imm) {
1352            cdest = tcg_const_i64(dest);
1353        }
1354
1355        if (c->is_64) {
1356            tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1357                                cdest, next);
1358            per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1359        } else {
1360            TCGv_i32 t0 = tcg_temp_new_i32();
1361            TCGv_i64 t1 = tcg_temp_new_i64();
1362            TCGv_i64 z = tcg_const_i64(0);
1363            tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1364            tcg_gen_extu_i32_i64(t1, t0);
1365            tcg_temp_free_i32(t0);
1366            tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1367            per_branch_cond(s, TCG_COND_NE, t1, z);
1368            tcg_temp_free_i64(t1);
1369            tcg_temp_free_i64(z);
1370        }
1371
1372        if (is_imm) {
1373            tcg_temp_free_i64(cdest);
1374        }
1375        tcg_temp_free_i64(next);
1376
1377        ret = EXIT_PC_UPDATED;
1378    }
1379
1380 egress:
1381    free_compare(c);
1382    return ret;
1383}
1384
1385/* ====================================================================== */
1386/* The operations.  These perform the bulk of the work for any insn,
1387   usually after the operands have been loaded and output initialized.  */
1388
1389static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1390{
1391    TCGv_i64 z, n;
1392    z = tcg_const_i64(0);
1393    n = tcg_temp_new_i64();
1394    tcg_gen_neg_i64(n, o->in2);
1395    tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1396    tcg_temp_free_i64(n);
1397    tcg_temp_free_i64(z);
1398    return NO_EXIT;
1399}
1400
1401static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1402{
1403    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1404    return NO_EXIT;
1405}
1406
1407static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1408{
1409    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1410    return NO_EXIT;
1411}
1412
1413static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1414{
1415    tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1416    tcg_gen_mov_i64(o->out2, o->in2);
1417    return NO_EXIT;
1418}
1419
1420static ExitStatus op_add(DisasContext *s, DisasOps *o)
1421{
1422    tcg_gen_add_i64(o->out, o->in1, o->in2);
1423    return NO_EXIT;
1424}
1425
1426static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1427{
1428    DisasCompare cmp;
1429    TCGv_i64 carry;
1430
1431    tcg_gen_add_i64(o->out, o->in1, o->in2);
1432
1433    /* The carry flag is the msb of CC, therefore the branch mask that would
1434       create that comparison is 3.  Feeding the generated comparison to
1435       setcond produces the carry flag that we desire.  */
1436    disas_jcc(s, &cmp, 3);
1437    carry = tcg_temp_new_i64();
1438    if (cmp.is_64) {
1439        tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1440    } else {
1441        TCGv_i32 t = tcg_temp_new_i32();
1442        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1443        tcg_gen_extu_i32_i64(carry, t);
1444        tcg_temp_free_i32(t);
1445    }
1446    free_compare(&cmp);
1447
1448    tcg_gen_add_i64(o->out, o->out, carry);
1449    tcg_temp_free_i64(carry);
1450    return NO_EXIT;
1451}
1452
1453static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1454{
1455    gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1456    return NO_EXIT;
1457}
1458
1459static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1460{
1461    gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1462    return NO_EXIT;
1463}
1464
1465static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1466{
1467    gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1468    return_low128(o->out2);
1469    return NO_EXIT;
1470}
1471
1472static ExitStatus op_and(DisasContext *s, DisasOps *o)
1473{
1474    tcg_gen_and_i64(o->out, o->in1, o->in2);
1475    return NO_EXIT;
1476}
1477
1478static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1479{
1480    int shift = s->insn->data & 0xff;
1481    int size = s->insn->data >> 8;
1482    uint64_t mask = ((1ull << size) - 1) << shift;
1483
1484    assert(!o->g_in2);
1485    tcg_gen_shli_i64(o->in2, o->in2, shift);
1486    tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1487    tcg_gen_and_i64(o->out, o->in1, o->in2);
1488
1489    /* Produce the CC from only the bits manipulated.  */
1490    tcg_gen_andi_i64(cc_dst, o->out, mask);
1491    set_cc_nz_u64(s, cc_dst);
1492    return NO_EXIT;
1493}
1494
1495static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1496{
1497    tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1498    if (!TCGV_IS_UNUSED_I64(o->in2)) {
1499        tcg_gen_mov_i64(psw_addr, o->in2);
1500        per_branch(s, false);
1501        return EXIT_PC_UPDATED;
1502    } else {
1503        return NO_EXIT;
1504    }
1505}
1506
1507static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1508{
1509    tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1510    return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1511}
1512
1513static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1514{
1515    int m1 = get_field(s->fields, m1);
1516    bool is_imm = have_field(s->fields, i2);
1517    int imm = is_imm ? get_field(s->fields, i2) : 0;
1518    DisasCompare c;
1519
1520    disas_jcc(s, &c, m1);
1521    return help_branch(s, &c, is_imm, imm, o->in2);
1522}
1523
1524static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1525{
1526    int r1 = get_field(s->fields, r1);
1527    bool is_imm = have_field(s->fields, i2);
1528    int imm = is_imm ? get_field(s->fields, i2) : 0;
1529    DisasCompare c;
1530    TCGv_i64 t;
1531
1532    c.cond = TCG_COND_NE;
1533    c.is_64 = false;
1534    c.g1 = false;
1535    c.g2 = false;
1536
1537    t = tcg_temp_new_i64();
1538    tcg_gen_subi_i64(t, regs[r1], 1);
1539    store_reg32_i64(r1, t);
1540    c.u.s32.a = tcg_temp_new_i32();
1541    c.u.s32.b = tcg_const_i32(0);
1542    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1543    tcg_temp_free_i64(t);
1544
1545    return help_branch(s, &c, is_imm, imm, o->in2);
1546}
1547
1548static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1549{
1550    int r1 = get_field(s->fields, r1);
1551    int imm = get_field(s->fields, i2);
1552    DisasCompare c;
1553    TCGv_i64 t;
1554
1555    c.cond = TCG_COND_NE;
1556    c.is_64 = false;
1557    c.g1 = false;
1558    c.g2 = false;
1559
1560    t = tcg_temp_new_i64();
1561    tcg_gen_shri_i64(t, regs[r1], 32);
1562    tcg_gen_subi_i64(t, t, 1);
1563    store_reg32h_i64(r1, t);
1564    c.u.s32.a = tcg_temp_new_i32();
1565    c.u.s32.b = tcg_const_i32(0);
1566    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1567    tcg_temp_free_i64(t);
1568
1569    return help_branch(s, &c, 1, imm, o->in2);
1570}
1571
1572static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1573{
1574    int r1 = get_field(s->fields, r1);
1575    bool is_imm = have_field(s->fields, i2);
1576    int imm = is_imm ? get_field(s->fields, i2) : 0;
1577    DisasCompare c;
1578
1579    c.cond = TCG_COND_NE;
1580    c.is_64 = true;
1581    c.g1 = true;
1582    c.g2 = false;
1583
1584    tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1585    c.u.s64.a = regs[r1];
1586    c.u.s64.b = tcg_const_i64(0);
1587
1588    return help_branch(s, &c, is_imm, imm, o->in2);
1589}
1590
1591static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1592{
1593    int r1 = get_field(s->fields, r1);
1594    int r3 = get_field(s->fields, r3);
1595    bool is_imm = have_field(s->fields, i2);
1596    int imm = is_imm ? get_field(s->fields, i2) : 0;
1597    DisasCompare c;
1598    TCGv_i64 t;
1599
1600    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1601    c.is_64 = false;
1602    c.g1 = false;
1603    c.g2 = false;
1604
1605    t = tcg_temp_new_i64();
1606    tcg_gen_add_i64(t, regs[r1], regs[r3]);
1607    c.u.s32.a = tcg_temp_new_i32();
1608    c.u.s32.b = tcg_temp_new_i32();
1609    tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1610    tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1611    store_reg32_i64(r1, t);
1612    tcg_temp_free_i64(t);
1613
1614    return help_branch(s, &c, is_imm, imm, o->in2);
1615}
1616
1617static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1618{
1619    int r1 = get_field(s->fields, r1);
1620    int r3 = get_field(s->fields, r3);
1621    bool is_imm = have_field(s->fields, i2);
1622    int imm = is_imm ? get_field(s->fields, i2) : 0;
1623    DisasCompare c;
1624
1625    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1626    c.is_64 = true;
1627
1628    if (r1 == (r3 | 1)) {
1629        c.u.s64.b = load_reg(r3 | 1);
1630        c.g2 = false;
1631    } else {
1632        c.u.s64.b = regs[r3 | 1];
1633        c.g2 = true;
1634    }
1635
1636    tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1637    c.u.s64.a = regs[r1];
1638    c.g1 = true;
1639
1640    return help_branch(s, &c, is_imm, imm, o->in2);
1641}
1642
1643static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1644{
1645    int imm, m3 = get_field(s->fields, m3);
1646    bool is_imm;
1647    DisasCompare c;
1648
1649    c.cond = ltgt_cond[m3];
1650    if (s->insn->data) {
1651        c.cond = tcg_unsigned_cond(c.cond);
1652    }
1653    c.is_64 = c.g1 = c.g2 = true;
1654    c.u.s64.a = o->in1;
1655    c.u.s64.b = o->in2;
1656
1657    is_imm = have_field(s->fields, i4);
1658    if (is_imm) {
1659        imm = get_field(s->fields, i4);
1660    } else {
1661        imm = 0;
1662        o->out = get_address(s, 0, get_field(s->fields, b4),
1663                             get_field(s->fields, d4));
1664    }
1665
1666    return help_branch(s, &c, is_imm, imm, o->out);
1667}
1668
1669static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1670{
1671    gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1672    set_cc_static(s);
1673    return NO_EXIT;
1674}
1675
1676static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1677{
1678    gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1679    set_cc_static(s);
1680    return NO_EXIT;
1681}
1682
1683static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1684{
1685    gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1686    set_cc_static(s);
1687    return NO_EXIT;
1688}
1689
1690static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1691{
1692    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1693    gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1694    tcg_temp_free_i32(m3);
1695    gen_set_cc_nz_f32(s, o->in2);
1696    return NO_EXIT;
1697}
1698
1699static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1700{
1701    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1702    gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1703    tcg_temp_free_i32(m3);
1704    gen_set_cc_nz_f64(s, o->in2);
1705    return NO_EXIT;
1706}
1707
1708static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1709{
1710    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1711    gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1712    tcg_temp_free_i32(m3);
1713    gen_set_cc_nz_f128(s, o->in1, o->in2);
1714    return NO_EXIT;
1715}
1716
1717static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1718{
1719    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1720    gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1721    tcg_temp_free_i32(m3);
1722    gen_set_cc_nz_f32(s, o->in2);
1723    return NO_EXIT;
1724}
1725
1726static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1727{
1728    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1729    gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1730    tcg_temp_free_i32(m3);
1731    gen_set_cc_nz_f64(s, o->in2);
1732    return NO_EXIT;
1733}
1734
1735static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1736{
1737    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1738    gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1739    tcg_temp_free_i32(m3);
1740    gen_set_cc_nz_f128(s, o->in1, o->in2);
1741    return NO_EXIT;
1742}
1743
1744static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1745{
1746    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1747    gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1748    tcg_temp_free_i32(m3);
1749    gen_set_cc_nz_f32(s, o->in2);
1750    return NO_EXIT;
1751}
1752
1753static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1754{
1755    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1756    gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1757    tcg_temp_free_i32(m3);
1758    gen_set_cc_nz_f64(s, o->in2);
1759    return NO_EXIT;
1760}
1761
1762static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1763{
1764    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1765    gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1766    tcg_temp_free_i32(m3);
1767    gen_set_cc_nz_f128(s, o->in1, o->in2);
1768    return NO_EXIT;
1769}
1770
1771static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1772{
1773    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1774    gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1775    tcg_temp_free_i32(m3);
1776    gen_set_cc_nz_f32(s, o->in2);
1777    return NO_EXIT;
1778}
1779
1780static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1781{
1782    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1783    gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1784    tcg_temp_free_i32(m3);
1785    gen_set_cc_nz_f64(s, o->in2);
1786    return NO_EXIT;
1787}
1788
1789static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1790{
1791    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1792    gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1793    tcg_temp_free_i32(m3);
1794    gen_set_cc_nz_f128(s, o->in1, o->in2);
1795    return NO_EXIT;
1796}
1797
1798static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1799{
1800    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1801    gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1802    tcg_temp_free_i32(m3);
1803    return NO_EXIT;
1804}
1805
1806static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1807{
1808    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1809    gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1810    tcg_temp_free_i32(m3);
1811    return NO_EXIT;
1812}
1813
1814static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1815{
1816    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1817    gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1818    tcg_temp_free_i32(m3);
1819    return_low128(o->out2);
1820    return NO_EXIT;
1821}
1822
1823static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1824{
1825    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1826    gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1827    tcg_temp_free_i32(m3);
1828    return NO_EXIT;
1829}
1830
1831static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1832{
1833    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1834    gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1835    tcg_temp_free_i32(m3);
1836    return NO_EXIT;
1837}
1838
1839static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1840{
1841    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1842    gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1843    tcg_temp_free_i32(m3);
1844    return_low128(o->out2);
1845    return NO_EXIT;
1846}
1847
1848static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1849{
1850    int r2 = get_field(s->fields, r2);
1851    TCGv_i64 len = tcg_temp_new_i64();
1852
1853    potential_page_fault(s);
1854    gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1855    set_cc_static(s);
1856    return_low128(o->out);
1857
1858    tcg_gen_add_i64(regs[r2], regs[r2], len);
1859    tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1860    tcg_temp_free_i64(len);
1861
1862    return NO_EXIT;
1863}
1864
1865static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1866{
1867    int l = get_field(s->fields, l1);
1868    TCGv_i32 vl;
1869
1870    switch (l + 1) {
1871    case 1:
1872        tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1873        tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1874        break;
1875    case 2:
1876        tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1877        tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1878        break;
1879    case 4:
1880        tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1881        tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1882        break;
1883    case 8:
1884        tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1885        tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1886        break;
1887    default:
1888        potential_page_fault(s);
1889        vl = tcg_const_i32(l);
1890        gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1891        tcg_temp_free_i32(vl);
1892        set_cc_static(s);
1893        return NO_EXIT;
1894    }
1895    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1896    return NO_EXIT;
1897}
1898
1899static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1900{
1901    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1902    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1903    potential_page_fault(s);
1904    gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1905    tcg_temp_free_i32(r1);
1906    tcg_temp_free_i32(r3);
1907    set_cc_static(s);
1908    return NO_EXIT;
1909}
1910
1911static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1912{
1913    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1914    TCGv_i32 t1 = tcg_temp_new_i32();
1915    tcg_gen_extrl_i64_i32(t1, o->in1);
1916    potential_page_fault(s);
1917    gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1918    set_cc_static(s);
1919    tcg_temp_free_i32(t1);
1920    tcg_temp_free_i32(m3);
1921    return NO_EXIT;
1922}
1923
1924static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1925{
1926    potential_page_fault(s);
1927    gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1928    set_cc_static(s);
1929    return_low128(o->in2);
1930    return NO_EXIT;
1931}
1932
1933static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1934{
1935    TCGv_i64 t = tcg_temp_new_i64();
1936    tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1937    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1938    tcg_gen_or_i64(o->out, o->out, t);
1939    tcg_temp_free_i64(t);
1940    return NO_EXIT;
1941}
1942
1943static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1944{
1945    /* FIXME: needs an atomic solution for CONFIG_USER_ONLY.  */
1946    int d2 = get_field(s->fields, d2);
1947    int b2 = get_field(s->fields, b2);
1948    int is_64 = s->insn->data;
1949    TCGv_i64 addr, mem, cc, z;
1950
1951    /* Note that in1 = R3 (new value) and
1952       in2 = (zero-extended) R1 (expected value).  */
1953
1954    /* Load the memory into the (temporary) output.  While the PoO only talks
1955       about moving the memory to R1 on inequality, if we include equality it
1956       means that R1 is equal to the memory in all conditions.  */
1957    addr = get_address(s, 0, b2, d2);
1958    if (is_64) {
1959        tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1960    } else {
1961        tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1962    }
1963
1964    /* Are the memory and expected values (un)equal?  Note that this setcond
1965       produces the output CC value, thus the NE sense of the test.  */
1966    cc = tcg_temp_new_i64();
1967    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1968
1969    /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1970       Recall that we are allowed to unconditionally issue the store (and
1971       thus any possible write trap), so (re-)store the original contents
1972       of MEM in case of inequality.  */
1973    z = tcg_const_i64(0);
1974    mem = tcg_temp_new_i64();
1975    tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1976    if (is_64) {
1977        tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1978    } else {
1979        tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1980    }
1981    tcg_temp_free_i64(z);
1982    tcg_temp_free_i64(mem);
1983    tcg_temp_free_i64(addr);
1984
1985    /* Store CC back to cc_op.  Wait until after the store so that any
1986       exception gets the old cc_op value.  */
1987    tcg_gen_extrl_i64_i32(cc_op, cc);
1988    tcg_temp_free_i64(cc);
1989    set_cc_static(s);
1990    return NO_EXIT;
1991}
1992
1993static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1994{
1995    /* FIXME: needs an atomic solution for CONFIG_USER_ONLY.  */
1996    int r1 = get_field(s->fields, r1);
1997    int r3 = get_field(s->fields, r3);
1998    int d2 = get_field(s->fields, d2);
1999    int b2 = get_field(s->fields, b2);
2000    TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
2001
2002    /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
2003
2004    addrh = get_address(s, 0, b2, d2);
2005    addrl = get_address(s, 0, b2, d2 + 8);
2006    outh = tcg_temp_new_i64();
2007    outl = tcg_temp_new_i64();
2008
2009    tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
2010    tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
2011
2012    /* Fold the double-word compare with arithmetic.  */
2013    cc = tcg_temp_new_i64();
2014    z = tcg_temp_new_i64();
2015    tcg_gen_xor_i64(cc, outh, regs[r1]);
2016    tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
2017    tcg_gen_or_i64(cc, cc, z);
2018    tcg_gen_movi_i64(z, 0);
2019    tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
2020
2021    memh = tcg_temp_new_i64();
2022    meml = tcg_temp_new_i64();
2023    tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
2024    tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
2025    tcg_temp_free_i64(z);
2026
2027    tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
2028    tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
2029    tcg_temp_free_i64(memh);
2030    tcg_temp_free_i64(meml);
2031    tcg_temp_free_i64(addrh);
2032    tcg_temp_free_i64(addrl);
2033
2034    /* Save back state now that we've passed all exceptions.  */
2035    tcg_gen_mov_i64(regs[r1], outh);
2036    tcg_gen_mov_i64(regs[r1 + 1], outl);
2037    tcg_gen_extrl_i64_i32(cc_op, cc);
2038    tcg_temp_free_i64(outh);
2039    tcg_temp_free_i64(outl);
2040    tcg_temp_free_i64(cc);
2041    set_cc_static(s);
2042    return NO_EXIT;
2043}
2044
2045#ifndef CONFIG_USER_ONLY
2046static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2047{
2048    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2049    check_privileged(s);
2050    gen_helper_csp(cc_op, cpu_env, r1, o->in2);
2051    tcg_temp_free_i32(r1);
2052    set_cc_static(s);
2053    return NO_EXIT;
2054}
2055#endif
2056
2057static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2058{
2059    TCGv_i64 t1 = tcg_temp_new_i64();
2060    TCGv_i32 t2 = tcg_temp_new_i32();
2061    tcg_gen_extrl_i64_i32(t2, o->in1);
2062    gen_helper_cvd(t1, t2);
2063    tcg_temp_free_i32(t2);
2064    tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2065    tcg_temp_free_i64(t1);
2066    return NO_EXIT;
2067}
2068
2069static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2070{
2071    int m3 = get_field(s->fields, m3);
2072    TCGLabel *lab = gen_new_label();
2073    TCGCond c;
2074
2075    c = tcg_invert_cond(ltgt_cond[m3]);
2076    if (s->insn->data) {
2077        c = tcg_unsigned_cond(c);
2078    }
2079    tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2080
2081    /* Trap.  */
2082    gen_trap(s);
2083
2084    gen_set_label(lab);
2085    return NO_EXIT;
2086}
2087
2088#ifndef CONFIG_USER_ONLY
2089static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2090{
2091    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2092    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2093    TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2094
2095    check_privileged(s);
2096    update_psw_addr(s);
2097    gen_op_calc_cc(s);
2098
2099    gen_helper_diag(cpu_env, r1, r3, func_code);
2100
2101    tcg_temp_free_i32(func_code);
2102    tcg_temp_free_i32(r3);
2103    tcg_temp_free_i32(r1);
2104    return NO_EXIT;
2105}
2106#endif
2107
2108static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2109{
2110    gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2111    return_low128(o->out);
2112    return NO_EXIT;
2113}
2114
2115static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2116{
2117    gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2118    return_low128(o->out);
2119    return NO_EXIT;
2120}
2121
2122static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2123{
2124    gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2125    return_low128(o->out);
2126    return NO_EXIT;
2127}
2128
2129static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2130{
2131    gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2132    return_low128(o->out);
2133    return NO_EXIT;
2134}
2135
2136static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2137{
2138    gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2139    return NO_EXIT;
2140}
2141
2142static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2143{
2144    gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2145    return NO_EXIT;
2146}
2147
2148static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2149{
2150    gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2151    return_low128(o->out2);
2152    return NO_EXIT;
2153}
2154
2155static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2156{
2157    int r2 = get_field(s->fields, r2);
2158    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2159    return NO_EXIT;
2160}
2161
2162static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2163{
2164    /* No cache information provided.  */
2165    tcg_gen_movi_i64(o->out, -1);
2166    return NO_EXIT;
2167}
2168
2169static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2170{
2171    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2172    return NO_EXIT;
2173}
2174
2175static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2176{
2177    int r1 = get_field(s->fields, r1);
2178    int r2 = get_field(s->fields, r2);
2179    TCGv_i64 t = tcg_temp_new_i64();
2180
2181    /* Note the "subsequently" in the PoO, which implies a defined result
2182       if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2183    tcg_gen_shri_i64(t, psw_mask, 32);
2184    store_reg32_i64(r1, t);
2185    if (r2 != 0) {
2186        store_reg32_i64(r2, psw_mask);
2187    }
2188
2189    tcg_temp_free_i64(t);
2190    return NO_EXIT;
2191}
2192
2193static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2194{
2195    /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2196       tb->flags, (ab)use the tb->cs_base field as the address of
2197       the template in memory, and grab 8 bits of tb->flags/cflags for
2198       the contents of the register.  We would then recognize all this
2199       in gen_intermediate_code_internal, generating code for exactly
2200       one instruction.  This new TB then gets executed normally.
2201
2202       On the other hand, this seems to be mostly used for modifying
2203       MVC inside of memcpy, which needs a helper call anyway.  So
2204       perhaps this doesn't bear thinking about any further.  */
2205
2206    TCGv_i64 tmp;
2207
2208    update_psw_addr(s);
2209    gen_op_calc_cc(s);
2210
2211    tmp = tcg_const_i64(s->next_pc);
2212    gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2213    tcg_temp_free_i64(tmp);
2214
2215    return NO_EXIT;
2216}
2217
2218static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2219{
2220    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2221    gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2222    tcg_temp_free_i32(m3);
2223    return NO_EXIT;
2224}
2225
2226static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2227{
2228    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2229    gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2230    tcg_temp_free_i32(m3);
2231    return NO_EXIT;
2232}
2233
2234static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2235{
2236    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2237    gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2238    return_low128(o->out2);
2239    tcg_temp_free_i32(m3);
2240    return NO_EXIT;
2241}
2242
2243static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2244{
2245    /* We'll use the original input for cc computation, since we get to
2246       compare that against 0, which ought to be better than comparing
2247       the real output against 64.  It also lets cc_dst be a convenient
2248       temporary during our computation.  */
2249    gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2250
2251    /* R1 = IN ? CLZ(IN) : 64.  */
2252    gen_helper_clz(o->out, o->in2);
2253
2254    /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2255       value by 64, which is undefined.  But since the shift is 64 iff the
2256       input is zero, we still get the correct result after and'ing.  */
2257    tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2258    tcg_gen_shr_i64(o->out2, o->out2, o->out);
2259    tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2260    return NO_EXIT;
2261}
2262
2263static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2264{
2265    int m3 = get_field(s->fields, m3);
2266    int pos, len, base = s->insn->data;
2267    TCGv_i64 tmp = tcg_temp_new_i64();
2268    uint64_t ccm;
2269
2270    switch (m3) {
2271    case 0xf:
2272        /* Effectively a 32-bit load.  */
2273        tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2274        len = 32;
2275        goto one_insert;
2276
2277    case 0xc:
2278    case 0x6:
2279    case 0x3:
2280        /* Effectively a 16-bit load.  */
2281        tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2282        len = 16;
2283        goto one_insert;
2284
2285    case 0x8:
2286    case 0x4:
2287    case 0x2:
2288    case 0x1:
2289        /* Effectively an 8-bit load.  */
2290        tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2291        len = 8;
2292        goto one_insert;
2293
2294    one_insert:
2295        pos = base + ctz32(m3) * 8;
2296        tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2297        ccm = ((1ull << len) - 1) << pos;
2298        break;
2299
2300    default:
2301        /* This is going to be a sequence of loads and inserts.  */
2302        pos = base + 32 - 8;
2303        ccm = 0;
2304        while (m3) {
2305            if (m3 & 0x8) {
2306                tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2307                tcg_gen_addi_i64(o->in2, o->in2, 1);
2308                tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2309                ccm |= 0xff << pos;
2310            }
2311            m3 = (m3 << 1) & 0xf;
2312            pos -= 8;
2313        }
2314        break;
2315    }
2316
2317    tcg_gen_movi_i64(tmp, ccm);
2318    gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2319    tcg_temp_free_i64(tmp);
2320    return NO_EXIT;
2321}
2322
2323static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2324{
2325    int shift = s->insn->data & 0xff;
2326    int size = s->insn->data >> 8;
2327    tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2328    return NO_EXIT;
2329}
2330
2331static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2332{
2333    TCGv_i64 t1;
2334
2335    gen_op_calc_cc(s);
2336    tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2337
2338    t1 = tcg_temp_new_i64();
2339    tcg_gen_shli_i64(t1, psw_mask, 20);
2340    tcg_gen_shri_i64(t1, t1, 36);
2341    tcg_gen_or_i64(o->out, o->out, t1);
2342
2343    tcg_gen_extu_i32_i64(t1, cc_op);
2344    tcg_gen_shli_i64(t1, t1, 28);
2345    tcg_gen_or_i64(o->out, o->out, t1);
2346    tcg_temp_free_i64(t1);
2347    return NO_EXIT;
2348}
2349
2350#ifndef CONFIG_USER_ONLY
2351static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2352{
2353    check_privileged(s);
2354    gen_helper_ipte(cpu_env, o->in1, o->in2);
2355    return NO_EXIT;
2356}
2357
2358static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2359{
2360    check_privileged(s);
2361    gen_helper_iske(o->out, cpu_env, o->in2);
2362    return NO_EXIT;
2363}
2364#endif
2365
2366static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2367{
2368    gen_helper_ldeb(o->out, cpu_env, o->in2);
2369    return NO_EXIT;
2370}
2371
2372static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2373{
2374    gen_helper_ledb(o->out, cpu_env, o->in2);
2375    return NO_EXIT;
2376}
2377
2378static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2379{
2380    gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2381    return NO_EXIT;
2382}
2383
2384static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2385{
2386    gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2387    return NO_EXIT;
2388}
2389
2390static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2391{
2392    gen_helper_lxdb(o->out, cpu_env, o->in2);
2393    return_low128(o->out2);
2394    return NO_EXIT;
2395}
2396
2397static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2398{
2399    gen_helper_lxeb(o->out, cpu_env, o->in2);
2400    return_low128(o->out2);
2401    return NO_EXIT;
2402}
2403
2404static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2405{
2406    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2407    return NO_EXIT;
2408}
2409
2410static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2411{
2412    tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2413    return NO_EXIT;
2414}
2415
2416static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2417{
2418    tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2419    return NO_EXIT;
2420}
2421
2422static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2423{
2424    tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2425    return NO_EXIT;
2426}
2427
2428static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2429{
2430    tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2431    return NO_EXIT;
2432}
2433
2434static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2435{
2436    tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2437    return NO_EXIT;
2438}
2439
2440static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2441{
2442    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2443    return NO_EXIT;
2444}
2445
2446static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2447{
2448    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2449    return NO_EXIT;
2450}
2451
2452static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2453{
2454    TCGLabel *lab = gen_new_label();
2455    store_reg32_i64(get_field(s->fields, r1), o->in2);
2456    /* The value is stored even in case of trap. */
2457    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2458    gen_trap(s);
2459    gen_set_label(lab);
2460    return NO_EXIT;
2461}
2462
2463static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2464{
2465    TCGLabel *lab = gen_new_label();
2466    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2467    /* The value is stored even in case of trap. */
2468    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2469    gen_trap(s);
2470    gen_set_label(lab);
2471    return NO_EXIT;
2472}
2473
2474static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2475{
2476    TCGLabel *lab = gen_new_label();
2477    store_reg32h_i64(get_field(s->fields, r1), o->in2);
2478    /* The value is stored even in case of trap. */
2479    tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2480    gen_trap(s);
2481    gen_set_label(lab);
2482    return NO_EXIT;
2483}
2484
2485static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2486{
2487    TCGLabel *lab = gen_new_label();
2488    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2489    /* The value is stored even in case of trap. */
2490    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2491    gen_trap(s);
2492    gen_set_label(lab);
2493    return NO_EXIT;
2494}
2495
2496static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2497{
2498    TCGLabel *lab = gen_new_label();
2499    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2500    /* The value is stored even in case of trap. */
2501    tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2502    gen_trap(s);
2503    gen_set_label(lab);
2504    return NO_EXIT;
2505}
2506
2507static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2508{
2509    DisasCompare c;
2510
2511    disas_jcc(s, &c, get_field(s->fields, m3));
2512
2513    if (c.is_64) {
2514        tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2515                            o->in2, o->in1);
2516        free_compare(&c);
2517    } else {
2518        TCGv_i32 t32 = tcg_temp_new_i32();
2519        TCGv_i64 t, z;
2520
2521        tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2522        free_compare(&c);
2523
2524        t = tcg_temp_new_i64();
2525        tcg_gen_extu_i32_i64(t, t32);
2526        tcg_temp_free_i32(t32);
2527
2528        z = tcg_const_i64(0);
2529        tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2530        tcg_temp_free_i64(t);
2531        tcg_temp_free_i64(z);
2532    }
2533
2534    return NO_EXIT;
2535}
2536
2537#ifndef CONFIG_USER_ONLY
2538static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2539{
2540    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2541    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2542    check_privileged(s);
2543    potential_page_fault(s);
2544    gen_helper_lctl(cpu_env, r1, o->in2, r3);
2545    tcg_temp_free_i32(r1);
2546    tcg_temp_free_i32(r3);
2547    return NO_EXIT;
2548}
2549
2550static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2551{
2552    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2553    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2554    check_privileged(s);
2555    potential_page_fault(s);
2556    gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2557    tcg_temp_free_i32(r1);
2558    tcg_temp_free_i32(r3);
2559    return NO_EXIT;
2560}
2561static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2562{
2563    check_privileged(s);
2564    potential_page_fault(s);
2565    gen_helper_lra(o->out, cpu_env, o->in2);
2566    set_cc_static(s);
2567    return NO_EXIT;
2568}
2569
2570static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2571{
2572    TCGv_i64 t1, t2;
2573
2574    check_privileged(s);
2575    per_breaking_event(s);
2576
2577    t1 = tcg_temp_new_i64();
2578    t2 = tcg_temp_new_i64();
2579    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2580    tcg_gen_addi_i64(o->in2, o->in2, 4);
2581    tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2582    /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
2583    tcg_gen_shli_i64(t1, t1, 32);
2584    gen_helper_load_psw(cpu_env, t1, t2);
2585    tcg_temp_free_i64(t1);
2586    tcg_temp_free_i64(t2);
2587    return EXIT_NORETURN;
2588}
2589
2590static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2591{
2592    TCGv_i64 t1, t2;
2593
2594    check_privileged(s);
2595    per_breaking_event(s);
2596
2597    t1 = tcg_temp_new_i64();
2598    t2 = tcg_temp_new_i64();
2599    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2600    tcg_gen_addi_i64(o->in2, o->in2, 8);
2601    tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2602    gen_helper_load_psw(cpu_env, t1, t2);
2603    tcg_temp_free_i64(t1);
2604    tcg_temp_free_i64(t2);
2605    return EXIT_NORETURN;
2606}
2607#endif
2608
2609static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2610{
2611    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2612    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2613    potential_page_fault(s);
2614    gen_helper_lam(cpu_env, r1, o->in2, r3);
2615    tcg_temp_free_i32(r1);
2616    tcg_temp_free_i32(r3);
2617    return NO_EXIT;
2618}
2619
2620static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2621{
2622    int r1 = get_field(s->fields, r1);
2623    int r3 = get_field(s->fields, r3);
2624    TCGv_i64 t1, t2;
2625
2626    /* Only one register to read. */
2627    t1 = tcg_temp_new_i64();
2628    if (unlikely(r1 == r3)) {
2629        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2630        store_reg32_i64(r1, t1);
2631        tcg_temp_free(t1);
2632        return NO_EXIT;
2633    }
2634
2635    /* First load the values of the first and last registers to trigger
2636       possible page faults. */
2637    t2 = tcg_temp_new_i64();
2638    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2639    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2640    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2641    store_reg32_i64(r1, t1);
2642    store_reg32_i64(r3, t2);
2643
2644    /* Only two registers to read. */
2645    if (((r1 + 1) & 15) == r3) {
2646        tcg_temp_free(t2);
2647        tcg_temp_free(t1);
2648        return NO_EXIT;
2649    }
2650
2651    /* Then load the remaining registers. Page fault can't occur. */
2652    r3 = (r3 - 1) & 15;
2653    tcg_gen_movi_i64(t2, 4);
2654    while (r1 != r3) {
2655        r1 = (r1 + 1) & 15;
2656        tcg_gen_add_i64(o->in2, o->in2, t2);
2657        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2658        store_reg32_i64(r1, t1);
2659    }
2660    tcg_temp_free(t2);
2661    tcg_temp_free(t1);
2662
2663    return NO_EXIT;
2664}
2665
2666static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2667{
2668    int r1 = get_field(s->fields, r1);
2669    int r3 = get_field(s->fields, r3);
2670    TCGv_i64 t1, t2;
2671
2672    /* Only one register to read. */
2673    t1 = tcg_temp_new_i64();
2674    if (unlikely(r1 == r3)) {
2675        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2676        store_reg32h_i64(r1, t1);
2677        tcg_temp_free(t1);
2678        return NO_EXIT;
2679    }
2680
2681    /* First load the values of the first and last registers to trigger
2682       possible page faults. */
2683    t2 = tcg_temp_new_i64();
2684    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2685    tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2686    tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2687    store_reg32h_i64(r1, t1);
2688    store_reg32h_i64(r3, t2);
2689
2690    /* Only two registers to read. */
2691    if (((r1 + 1) & 15) == r3) {
2692        tcg_temp_free(t2);
2693        tcg_temp_free(t1);
2694        return NO_EXIT;
2695    }
2696
2697    /* Then load the remaining registers. Page fault can't occur. */
2698    r3 = (r3 - 1) & 15;
2699    tcg_gen_movi_i64(t2, 4);
2700    while (r1 != r3) {
2701        r1 = (r1 + 1) & 15;
2702        tcg_gen_add_i64(o->in2, o->in2, t2);
2703        tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2704        store_reg32h_i64(r1, t1);
2705    }
2706    tcg_temp_free(t2);
2707    tcg_temp_free(t1);
2708
2709    return NO_EXIT;
2710}
2711
2712static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2713{
2714    int r1 = get_field(s->fields, r1);
2715    int r3 = get_field(s->fields, r3);
2716    TCGv_i64 t1, t2;
2717
2718    /* Only one register to read. */
2719    if (unlikely(r1 == r3)) {
2720        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2721        return NO_EXIT;
2722    }
2723
2724    /* First load the values of the first and last registers to trigger
2725       possible page faults. */
2726    t1 = tcg_temp_new_i64();
2727    t2 = tcg_temp_new_i64();
2728    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2729    tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2730    tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2731    tcg_gen_mov_i64(regs[r1], t1);
2732    tcg_temp_free(t2);
2733
2734    /* Only two registers to read. */
2735    if (((r1 + 1) & 15) == r3) {
2736        tcg_temp_free(t1);
2737        return NO_EXIT;
2738    }
2739
2740    /* Then load the remaining registers. Page fault can't occur. */
2741    r3 = (r3 - 1) & 15;
2742    tcg_gen_movi_i64(t1, 8);
2743    while (r1 != r3) {
2744        r1 = (r1 + 1) & 15;
2745        tcg_gen_add_i64(o->in2, o->in2, t1);
2746        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2747    }
2748    tcg_temp_free(t1);
2749
2750    return NO_EXIT;
2751}
2752
2753#ifndef CONFIG_USER_ONLY
2754static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2755{
2756    check_privileged(s);
2757    potential_page_fault(s);
2758    gen_helper_lura(o->out, cpu_env, o->in2);
2759    return NO_EXIT;
2760}
2761
2762static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2763{
2764    check_privileged(s);
2765    potential_page_fault(s);
2766    gen_helper_lurag(o->out, cpu_env, o->in2);
2767    return NO_EXIT;
2768}
2769#endif
2770
2771static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2772{
2773    o->out = o->in2;
2774    o->g_out = o->g_in2;
2775    TCGV_UNUSED_I64(o->in2);
2776    o->g_in2 = false;
2777    return NO_EXIT;
2778}
2779
2780static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2781{
2782    int b2 = get_field(s->fields, b2);
2783    TCGv ar1 = tcg_temp_new_i64();
2784
2785    o->out = o->in2;
2786    o->g_out = o->g_in2;
2787    TCGV_UNUSED_I64(o->in2);
2788    o->g_in2 = false;
2789
2790    switch (s->tb->flags & FLAG_MASK_ASC) {
2791    case PSW_ASC_PRIMARY >> 32:
2792        tcg_gen_movi_i64(ar1, 0);
2793        break;
2794    case PSW_ASC_ACCREG >> 32:
2795        tcg_gen_movi_i64(ar1, 1);
2796        break;
2797    case PSW_ASC_SECONDARY >> 32:
2798        if (b2) {
2799            tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2800        } else {
2801            tcg_gen_movi_i64(ar1, 0);
2802        }
2803        break;
2804    case PSW_ASC_HOME >> 32:
2805        tcg_gen_movi_i64(ar1, 2);
2806        break;
2807    }
2808
2809    tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2810    tcg_temp_free_i64(ar1);
2811
2812    return NO_EXIT;
2813}
2814
2815static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2816{
2817    o->out = o->in1;
2818    o->out2 = o->in2;
2819    o->g_out = o->g_in1;
2820    o->g_out2 = o->g_in2;
2821    TCGV_UNUSED_I64(o->in1);
2822    TCGV_UNUSED_I64(o->in2);
2823    o->g_in1 = o->g_in2 = false;
2824    return NO_EXIT;
2825}
2826
2827static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2828{
2829    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2830    potential_page_fault(s);
2831    gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2832    tcg_temp_free_i32(l);
2833    return NO_EXIT;
2834}
2835
2836static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2837{
2838    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2839    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2840    potential_page_fault(s);
2841    gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2842    tcg_temp_free_i32(r1);
2843    tcg_temp_free_i32(r2);
2844    set_cc_static(s);
2845    return NO_EXIT;
2846}
2847
2848static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2849{
2850    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2851    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2852    potential_page_fault(s);
2853    gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2854    tcg_temp_free_i32(r1);
2855    tcg_temp_free_i32(r3);
2856    set_cc_static(s);
2857    return NO_EXIT;
2858}
2859
2860#ifndef CONFIG_USER_ONLY
2861static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2862{
2863    int r1 = get_field(s->fields, l1);
2864    check_privileged(s);
2865    potential_page_fault(s);
2866    gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2867    set_cc_static(s);
2868    return NO_EXIT;
2869}
2870
2871static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2872{
2873    int r1 = get_field(s->fields, l1);
2874    check_privileged(s);
2875    potential_page_fault(s);
2876    gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2877    set_cc_static(s);
2878    return NO_EXIT;
2879}
2880#endif
2881
2882static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2883{
2884    potential_page_fault(s);
2885    gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2886    set_cc_static(s);
2887    return NO_EXIT;
2888}
2889
2890static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2891{
2892    potential_page_fault(s);
2893    gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2894    set_cc_static(s);
2895    return_low128(o->in2);
2896    return NO_EXIT;
2897}
2898
2899static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2900{
2901    tcg_gen_mul_i64(o->out, o->in1, o->in2);
2902    return NO_EXIT;
2903}
2904
2905static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2906{
2907    tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2908    return NO_EXIT;
2909}
2910
2911static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2912{
2913    gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2914    return NO_EXIT;
2915}
2916
2917static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2918{
2919    gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2920    return NO_EXIT;
2921}
2922
2923static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2924{
2925    gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2926    return NO_EXIT;
2927}
2928
2929static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2930{
2931    gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2932    return_low128(o->out2);
2933    return NO_EXIT;
2934}
2935
2936static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2937{
2938    gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2939    return_low128(o->out2);
2940    return NO_EXIT;
2941}
2942
2943static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2944{
2945    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2946    gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2947    tcg_temp_free_i64(r3);
2948    return NO_EXIT;
2949}
2950
2951static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2952{
2953    int r3 = get_field(s->fields, r3);
2954    gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2955    return NO_EXIT;
2956}
2957
2958static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2959{
2960    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2961    gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2962    tcg_temp_free_i64(r3);
2963    return NO_EXIT;
2964}
2965
2966static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2967{
2968    int r3 = get_field(s->fields, r3);
2969    gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2970    return NO_EXIT;
2971}
2972
2973static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2974{
2975    TCGv_i64 z, n;
2976    z = tcg_const_i64(0);
2977    n = tcg_temp_new_i64();
2978    tcg_gen_neg_i64(n, o->in2);
2979    tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2980    tcg_temp_free_i64(n);
2981    tcg_temp_free_i64(z);
2982    return NO_EXIT;
2983}
2984
2985static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2986{
2987    tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2988    return NO_EXIT;
2989}
2990
2991static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2992{
2993    tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2994    return NO_EXIT;
2995}
2996
2997static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2998{
2999    tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3000    tcg_gen_mov_i64(o->out2, o->in2);
3001    return NO_EXIT;
3002}
3003
3004static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3005{
3006    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3007    potential_page_fault(s);
3008    gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3009    tcg_temp_free_i32(l);
3010    set_cc_static(s);
3011    return NO_EXIT;
3012}
3013
3014static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3015{
3016    tcg_gen_neg_i64(o->out, o->in2);
3017    return NO_EXIT;
3018}
3019
3020static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3021{
3022    tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3023    return NO_EXIT;
3024}
3025
3026static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3027{
3028    tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3029    return NO_EXIT;
3030}
3031
3032static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3033{
3034    tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3035    tcg_gen_mov_i64(o->out2, o->in2);
3036    return NO_EXIT;
3037}
3038
3039static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3040{
3041    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3042    potential_page_fault(s);
3043    gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3044    tcg_temp_free_i32(l);
3045    set_cc_static(s);
3046    return NO_EXIT;
3047}
3048
3049static ExitStatus op_or(DisasContext *s, DisasOps *o)
3050{
3051    tcg_gen_or_i64(o->out, o->in1, o->in2);
3052    return NO_EXIT;
3053}
3054
3055static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3056{
3057    int shift = s->insn->data & 0xff;
3058    int size = s->insn->data >> 8;
3059    uint64_t mask = ((1ull << size) - 1) << shift;
3060
3061    assert(!o->g_in2);
3062    tcg_gen_shli_i64(o->in2, o->in2, shift);
3063    tcg_gen_or_i64(o->out, o->in1, o->in2);
3064
3065    /* Produce the CC from only the bits manipulated.  */
3066    tcg_gen_andi_i64(cc_dst, o->out, mask);
3067    set_cc_nz_u64(s, cc_dst);
3068    return NO_EXIT;
3069}
3070
3071static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3072{
3073    gen_helper_popcnt(o->out, o->in2);
3074    return NO_EXIT;
3075}
3076
3077#ifndef CONFIG_USER_ONLY
3078static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3079{
3080    check_privileged(s);
3081    gen_helper_ptlb(cpu_env);
3082    return NO_EXIT;
3083}
3084#endif
3085
3086static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3087{
3088    int i3 = get_field(s->fields, i3);
3089    int i4 = get_field(s->fields, i4);
3090    int i5 = get_field(s->fields, i5);
3091    int do_zero = i4 & 0x80;
3092    uint64_t mask, imask, pmask;
3093    int pos, len, rot;
3094
3095    /* Adjust the arguments for the specific insn.  */
3096    switch (s->fields->op2) {
3097    case 0x55: /* risbg */
3098        i3 &= 63;
3099        i4 &= 63;
3100        pmask = ~0;
3101        break;
3102    case 0x5d: /* risbhg */
3103        i3 &= 31;
3104        i4 &= 31;
3105        pmask = 0xffffffff00000000ull;
3106        break;
3107    case 0x51: /* risblg */
3108        i3 &= 31;
3109        i4 &= 31;
3110        pmask = 0x00000000ffffffffull;
3111        break;
3112    default:
3113        abort();
3114    }
3115
3116    /* MASK is the set of bits to be inserted from R2.
3117       Take care for I3/I4 wraparound.  */
3118    mask = pmask >> i3;
3119    if (i3 <= i4) {
3120        mask ^= pmask >> i4 >> 1;
3121    } else {
3122        mask |= ~(pmask >> i4 >> 1);
3123    }
3124    mask &= pmask;
3125
3126    /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3127       insns, we need to keep the other half of the register.  */
3128    imask = ~mask | ~pmask;
3129    if (do_zero) {
3130        if (s->fields->op2 == 0x55) {
3131            imask = 0;
3132        } else {
3133            imask = ~pmask;
3134        }
3135    }
3136
3137    /* In some cases we can implement this with deposit, which can be more
3138       efficient on some hosts.  */
3139    if (~mask == imask && i3 <= i4) {
3140        if (s->fields->op2 == 0x5d) {
3141            i3 += 32, i4 += 32;
3142        }
3143        /* Note that we rotate the bits to be inserted to the lsb, not to
3144           the position as described in the PoO.  */
3145        len = i4 - i3 + 1;
3146        pos = 63 - i4;
3147        rot = (i5 - pos) & 63;
3148    } else {
3149        pos = len = -1;
3150        rot = i5 & 63;
3151    }
3152
3153    /* Rotate the input as necessary.  */
3154    tcg_gen_rotli_i64(o->in2, o->in2, rot);
3155
3156    /* Insert the selected bits into the output.  */
3157    if (pos >= 0) {
3158        tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3159    } else if (imask == 0) {
3160        tcg_gen_andi_i64(o->out, o->in2, mask);
3161    } else {
3162        tcg_gen_andi_i64(o->in2, o->in2, mask);
3163        tcg_gen_andi_i64(o->out, o->out, imask);
3164        tcg_gen_or_i64(o->out, o->out, o->in2);
3165    }
3166    return NO_EXIT;
3167}
3168
3169static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3170{
3171    int i3 = get_field(s->fields, i3);
3172    int i4 = get_field(s->fields, i4);
3173    int i5 = get_field(s->fields, i5);
3174    uint64_t mask;
3175
3176    /* If this is a test-only form, arrange to discard the result.  */
3177    if (i3 & 0x80) {
3178        o->out = tcg_temp_new_i64();
3179        o->g_out = false;
3180    }
3181
3182    i3 &= 63;
3183    i4 &= 63;
3184    i5 &= 63;
3185
3186    /* MASK is the set of bits to be operated on from R2.
3187       Take care for I3/I4 wraparound.  */
3188    mask = ~0ull >> i3;
3189    if (i3 <= i4) {
3190        mask ^= ~0ull >> i4 >> 1;
3191    } else {
3192        mask |= ~(~0ull >> i4 >> 1);
3193    }
3194
3195    /* Rotate the input as necessary.  */
3196    tcg_gen_rotli_i64(o->in2, o->in2, i5);
3197
3198    /* Operate.  */
3199    switch (s->fields->op2) {
3200    case 0x55: /* AND */
3201        tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3202        tcg_gen_and_i64(o->out, o->out, o->in2);
3203        break;
3204    case 0x56: /* OR */
3205        tcg_gen_andi_i64(o->in2, o->in2, mask);
3206        tcg_gen_or_i64(o->out, o->out, o->in2);
3207        break;
3208    case 0x57: /* XOR */
3209        tcg_gen_andi_i64(o->in2, o->in2, mask);
3210        tcg_gen_xor_i64(o->out, o->out, o->in2);
3211        break;
3212    default:
3213        abort();
3214    }
3215
3216    /* Set the CC.  */
3217    tcg_gen_andi_i64(cc_dst, o->out, mask);
3218    set_cc_nz_u64(s, cc_dst);
3219    return NO_EXIT;
3220}
3221
3222static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3223{
3224    tcg_gen_bswap16_i64(o->out, o->in2);
3225    return NO_EXIT;
3226}
3227
3228static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3229{
3230    tcg_gen_bswap32_i64(o->out, o->in2);
3231    return NO_EXIT;
3232}
3233
3234static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3235{
3236    tcg_gen_bswap64_i64(o->out, o->in2);
3237    return NO_EXIT;
3238}
3239
3240static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3241{
3242    TCGv_i32 t1 = tcg_temp_new_i32();
3243    TCGv_i32 t2 = tcg_temp_new_i32();
3244    TCGv_i32 to = tcg_temp_new_i32();
3245    tcg_gen_extrl_i64_i32(t1, o->in1);
3246    tcg_gen_extrl_i64_i32(t2, o->in2);
3247    tcg_gen_rotl_i32(to, t1, t2);
3248    tcg_gen_extu_i32_i64(o->out, to);
3249    tcg_temp_free_i32(t1);
3250    tcg_temp_free_i32(t2);
3251    tcg_temp_free_i32(to);
3252    return NO_EXIT;
3253}
3254
3255static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3256{
3257    tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3258    return NO_EXIT;
3259}
3260
3261#ifndef CONFIG_USER_ONLY
3262static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3263{
3264    check_privileged(s);
3265    gen_helper_rrbe(cc_op, cpu_env, o->in2);
3266    set_cc_static(s);
3267    return NO_EXIT;
3268}
3269
3270static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3271{
3272    check_privileged(s);
3273    gen_helper_sacf(cpu_env, o->in2);
3274    /* Addressing mode has changed, so end the block.  */
3275    return EXIT_PC_STALE;
3276}
3277#endif
3278
3279static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3280{
3281    int sam = s->insn->data;
3282    TCGv_i64 tsam;
3283    uint64_t mask;
3284
3285    switch (sam) {
3286    case 0:
3287        mask = 0xffffff;
3288        break;
3289    case 1:
3290        mask = 0x7fffffff;
3291        break;
3292    default:
3293        mask = -1;
3294        break;
3295    }
3296
3297    /* Bizarre but true, we check the address of the current insn for the
3298       specification exception, not the next to be executed.  Thus the PoO
3299       documents that Bad Things Happen two bytes before the end.  */
3300    if (s->pc & ~mask) {
3301        gen_program_exception(s, PGM_SPECIFICATION);
3302        return EXIT_NORETURN;
3303    }
3304    s->next_pc &= mask;
3305
3306    tsam = tcg_const_i64(sam);
3307    tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3308    tcg_temp_free_i64(tsam);
3309
3310    /* Always exit the TB, since we (may have) changed execution mode.  */
3311    return EXIT_PC_STALE;
3312}
3313
3314static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3315{
3316    int r1 = get_field(s->fields, r1);
3317    tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3318    return NO_EXIT;
3319}
3320
3321static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3322{
3323    gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3324    return NO_EXIT;
3325}
3326
3327static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3328{
3329    gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3330    return NO_EXIT;
3331}
3332
3333static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3334{
3335    gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3336    return_low128(o->out2);
3337    return NO_EXIT;
3338}
3339
3340static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3341{
3342    gen_helper_sqeb(o->out, cpu_env, o->in2);
3343    return NO_EXIT;
3344}
3345
3346static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3347{
3348    gen_helper_sqdb(o->out, cpu_env, o->in2);
3349    return NO_EXIT;
3350}
3351
3352static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3353{
3354    gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3355    return_low128(o->out2);
3356    return NO_EXIT;
3357}
3358
3359#ifndef CONFIG_USER_ONLY
3360static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3361{
3362    check_privileged(s);
3363    potential_page_fault(s);
3364    gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3365    set_cc_static(s);
3366    return NO_EXIT;
3367}
3368
3369static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3370{
3371    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3372    check_privileged(s);
3373    potential_page_fault(s);
3374    gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3375    tcg_temp_free_i32(r1);
3376    return NO_EXIT;
3377}
3378#endif
3379
3380static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3381{
3382    DisasCompare c;
3383    TCGv_i64 a;
3384    TCGLabel *lab;
3385    int r1;
3386
3387    disas_jcc(s, &c, get_field(s->fields, m3));
3388
3389    /* We want to store when the condition is fulfilled, so branch
3390       out when it's not */
3391    c.cond = tcg_invert_cond(c.cond);
3392
3393    lab = gen_new_label();
3394    if (c.is_64) {
3395        tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3396    } else {
3397        tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3398    }
3399    free_compare(&c);
3400
3401    r1 = get_field(s->fields, r1);
3402    a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3403    if (s->insn->data) {
3404        tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3405    } else {
3406        tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3407    }
3408    tcg_temp_free_i64(a);
3409
3410    gen_set_label(lab);
3411    return NO_EXIT;
3412}
3413
3414static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3415{
3416    uint64_t sign = 1ull << s->insn->data;
3417    enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3418    gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3419    tcg_gen_shl_i64(o->out, o->in1, o->in2);
3420    /* The arithmetic left shift is curious in that it does not affect
3421       the sign bit.  Copy that over from the source unchanged.  */
3422    tcg_gen_andi_i64(o->out, o->out, ~sign);
3423    tcg_gen_andi_i64(o->in1, o->in1, sign);
3424    tcg_gen_or_i64(o->out, o->out, o->in1);
3425    return NO_EXIT;
3426}
3427
3428static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3429{
3430    tcg_gen_shl_i64(o->out, o->in1, o->in2);
3431    return NO_EXIT;
3432}
3433
3434static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3435{
3436    tcg_gen_sar_i64(o->out, o->in1, o->in2);
3437    return NO_EXIT;
3438}
3439
3440static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3441{
3442    tcg_gen_shr_i64(o->out, o->in1, o->in2);
3443    return NO_EXIT;
3444}
3445
3446static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3447{
3448    gen_helper_sfpc(cpu_env, o->in2);
3449    return NO_EXIT;
3450}
3451
3452static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3453{
3454    gen_helper_sfas(cpu_env, o->in2);
3455    return NO_EXIT;
3456}
3457
3458static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3459{
3460    int b2 = get_field(s->fields, b2);
3461    int d2 = get_field(s->fields, d2);
3462    TCGv_i64 t1 = tcg_temp_new_i64();
3463    TCGv_i64 t2 = tcg_temp_new_i64();
3464    int mask, pos, len;
3465
3466    switch (s->fields->op2) {
3467    case 0x99: /* SRNM */
3468        pos = 0, len = 2;
3469        break;
3470    case 0xb8: /* SRNMB */
3471        pos = 0, len = 3;
3472        break;
3473    case 0xb9: /* SRNMT */
3474        pos = 4, len = 3;
3475        break;
3476    default:
3477        tcg_abort();
3478    }
3479    mask = (1 << len) - 1;
3480
3481    /* Insert the value into the appropriate field of the FPC.  */
3482    if (b2 == 0) {
3483        tcg_gen_movi_i64(t1, d2 & mask);
3484    } else {
3485        tcg_gen_addi_i64(t1, regs[b2], d2);
3486        tcg_gen_andi_i64(t1, t1, mask);
3487    }
3488    tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3489    tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3490    tcg_temp_free_i64(t1);
3491
3492    /* Then install the new FPC to set the rounding mode in fpu_status.  */
3493    gen_helper_sfpc(cpu_env, t2);
3494    tcg_temp_free_i64(t2);
3495    return NO_EXIT;
3496}
3497
3498#ifndef CONFIG_USER_ONLY
3499static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3500{
3501    check_privileged(s);
3502    tcg_gen_shri_i64(o->in2, o->in2, 4);
3503    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3504    return NO_EXIT;
3505}
3506
3507static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3508{
3509    check_privileged(s);
3510    gen_helper_sske(cpu_env, o->in1, o->in2);
3511    return NO_EXIT;
3512}
3513
3514static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3515{
3516    check_privileged(s);
3517    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3518    return NO_EXIT;
3519}
3520
3521static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3522{
3523    check_privileged(s);
3524    /* ??? Surely cpu address != cpu number.  In any case the previous
3525       version of this stored more than the required half-word, so it
3526       is unlikely this has ever been tested.  */
3527    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3528    return NO_EXIT;
3529}
3530
3531static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3532{
3533    gen_helper_stck(o->out, cpu_env);
3534    /* ??? We don't implement clock states.  */
3535    gen_op_movi_cc(s, 0);
3536    return NO_EXIT;
3537}
3538
3539static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3540{
3541    TCGv_i64 c1 = tcg_temp_new_i64();
3542    TCGv_i64 c2 = tcg_temp_new_i64();
3543    gen_helper_stck(c1, cpu_env);
3544    /* Shift the 64-bit value into its place as a zero-extended
3545       104-bit value.  Note that "bit positions 64-103 are always
3546       non-zero so that they compare differently to STCK"; we set
3547       the least significant bit to 1.  */
3548    tcg_gen_shli_i64(c2, c1, 56);
3549    tcg_gen_shri_i64(c1, c1, 8);
3550    tcg_gen_ori_i64(c2, c2, 0x10000);
3551    tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3552    tcg_gen_addi_i64(o->in2, o->in2, 8);
3553    tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3554    tcg_temp_free_i64(c1);
3555    tcg_temp_free_i64(c2);
3556    /* ??? We don't implement clock states.  */
3557    gen_op_movi_cc(s, 0);
3558    return NO_EXIT;
3559}
3560
3561static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3562{
3563    check_privileged(s);
3564    gen_helper_sckc(cpu_env, o->in2);
3565    return NO_EXIT;
3566}
3567
3568static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3569{
3570    check_privileged(s);
3571    gen_helper_stckc(o->out, cpu_env);
3572    return NO_EXIT;
3573}
3574
3575static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3576{
3577    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3578    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3579    check_privileged(s);
3580    potential_page_fault(s);
3581    gen_helper_stctg(cpu_env, r1, o->in2, r3);
3582    tcg_temp_free_i32(r1);
3583    tcg_temp_free_i32(r3);
3584    return NO_EXIT;
3585}
3586
3587static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3588{
3589    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3590    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3591    check_privileged(s);
3592    potential_page_fault(s);
3593    gen_helper_stctl(cpu_env, r1, o->in2, r3);
3594    tcg_temp_free_i32(r1);
3595    tcg_temp_free_i32(r3);
3596    return NO_EXIT;
3597}
3598
3599static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3600{
3601    TCGv_i64 t1 = tcg_temp_new_i64();
3602
3603    check_privileged(s);
3604    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3605    tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3606    tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3607    tcg_temp_free_i64(t1);
3608
3609    return NO_EXIT;
3610}
3611
3612static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3613{
3614    check_privileged(s);
3615    gen_helper_spt(cpu_env, o->in2);
3616    return NO_EXIT;
3617}
3618
3619static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3620{
3621    TCGv_i64 f, a;
3622    /* We really ought to have more complete indication of facilities
3623       that we implement.  Address this when STFLE is implemented.  */
3624    check_privileged(s);
3625    f = tcg_const_i64(0xc0000000);
3626    a = tcg_const_i64(200);
3627    tcg_gen_qemu_st32(f, a, get_mem_index(s));
3628    tcg_temp_free_i64(f);
3629    tcg_temp_free_i64(a);
3630    return NO_EXIT;
3631}
3632
3633static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3634{
3635    check_privileged(s);
3636    gen_helper_stpt(o->out, cpu_env);
3637    return NO_EXIT;
3638}
3639
3640static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3641{
3642    check_privileged(s);
3643    potential_page_fault(s);
3644    gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3645    set_cc_static(s);
3646    return NO_EXIT;
3647}
3648
3649static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3650{
3651    check_privileged(s);
3652    gen_helper_spx(cpu_env, o->in2);
3653    return NO_EXIT;
3654}
3655
3656static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3657{
3658    check_privileged(s);
3659    potential_page_fault(s);
3660    gen_helper_xsch(cpu_env, regs[1]);
3661    set_cc_static(s);
3662    return NO_EXIT;
3663}
3664
3665static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3666{
3667    check_privileged(s);
3668    potential_page_fault(s);
3669    gen_helper_csch(cpu_env, regs[1]);
3670    set_cc_static(s);
3671    return NO_EXIT;
3672}
3673
3674static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3675{
3676    check_privileged(s);
3677    potential_page_fault(s);
3678    gen_helper_hsch(cpu_env, regs[1]);
3679    set_cc_static(s);
3680    return NO_EXIT;
3681}
3682
3683static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3684{
3685    check_privileged(s);
3686    potential_page_fault(s);
3687    gen_helper_msch(cpu_env, regs[1], o->in2);
3688    set_cc_static(s);
3689    return NO_EXIT;
3690}
3691
3692static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3693{
3694    check_privileged(s);
3695    potential_page_fault(s);
3696    gen_helper_rchp(cpu_env, regs[1]);
3697    set_cc_static(s);
3698    return NO_EXIT;
3699}
3700
3701static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3702{
3703    check_privileged(s);
3704    potential_page_fault(s);
3705    gen_helper_rsch(cpu_env, regs[1]);
3706    set_cc_static(s);
3707    return NO_EXIT;
3708}
3709
3710static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3711{
3712    check_privileged(s);
3713    potential_page_fault(s);
3714    gen_helper_ssch(cpu_env, regs[1], o->in2);
3715    set_cc_static(s);
3716    return NO_EXIT;
3717}
3718
3719static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3720{
3721    check_privileged(s);
3722    potential_page_fault(s);
3723    gen_helper_stsch(cpu_env, regs[1], o->in2);
3724    set_cc_static(s);
3725    return NO_EXIT;
3726}
3727
3728static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3729{
3730    check_privileged(s);
3731    potential_page_fault(s);
3732    gen_helper_tsch(cpu_env, regs[1], o->in2);
3733    set_cc_static(s);
3734    return NO_EXIT;
3735}
3736
3737static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3738{
3739    check_privileged(s);
3740    potential_page_fault(s);
3741    gen_helper_chsc(cpu_env, o->in2);
3742    set_cc_static(s);
3743    return NO_EXIT;
3744}
3745
3746static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3747{
3748    check_privileged(s);
3749    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3750    tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3751    return NO_EXIT;
3752}
3753
3754static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3755{
3756    uint64_t i2 = get_field(s->fields, i2);
3757    TCGv_i64 t;
3758
3759    check_privileged(s);
3760
3761    /* It is important to do what the instruction name says: STORE THEN.
3762       If we let the output hook perform the store then if we fault and
3763       restart, we'll have the wrong SYSTEM MASK in place.  */
3764    t = tcg_temp_new_i64();
3765    tcg_gen_shri_i64(t, psw_mask, 56);
3766    tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3767    tcg_temp_free_i64(t);
3768
3769    if (s->fields->op == 0xac) {
3770        tcg_gen_andi_i64(psw_mask, psw_mask,
3771                         (i2 << 56) | 0x00ffffffffffffffull);
3772    } else {
3773        tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3774    }
3775    return NO_EXIT;
3776}
3777
3778static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3779{
3780    check_privileged(s);
3781    potential_page_fault(s);
3782    gen_helper_stura(cpu_env, o->in2, o->in1);
3783    return NO_EXIT;
3784}
3785
3786static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3787{
3788    check_privileged(s);
3789    potential_page_fault(s);
3790    gen_helper_sturg(cpu_env, o->in2, o->in1);
3791    return NO_EXIT;
3792}
3793#endif
3794
3795static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3796{
3797    tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3798    return NO_EXIT;
3799}
3800
3801static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3802{
3803    tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3804    return NO_EXIT;
3805}
3806
3807static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3808{
3809    tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3810    return NO_EXIT;
3811}
3812
3813static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3814{
3815    tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3816    return NO_EXIT;
3817}
3818
3819static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3820{
3821    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3822    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3823    potential_page_fault(s);
3824    gen_helper_stam(cpu_env, r1, o->in2, r3);
3825    tcg_temp_free_i32(r1);
3826    tcg_temp_free_i32(r3);
3827    return NO_EXIT;
3828}
3829
3830static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3831{
3832    int m3 = get_field(s->fields, m3);
3833    int pos, base = s->insn->data;
3834    TCGv_i64 tmp = tcg_temp_new_i64();
3835
3836    pos = base + ctz32(m3) * 8;
3837    switch (m3) {
3838    case 0xf:
3839        /* Effectively a 32-bit store.  */
3840        tcg_gen_shri_i64(tmp, o->in1, pos);
3841        tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3842        break;
3843
3844    case 0xc:
3845    case 0x6:
3846    case 0x3:
3847        /* Effectively a 16-bit store.  */
3848        tcg_gen_shri_i64(tmp, o->in1, pos);
3849        tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3850        break;
3851
3852    case 0x8:
3853    case 0x4:
3854    case 0x2:
3855    case 0x1:
3856        /* Effectively an 8-bit store.  */
3857        tcg_gen_shri_i64(tmp, o->in1, pos);
3858        tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3859        break;
3860
3861    default:
3862        /* This is going to be a sequence of shifts and stores.  */
3863        pos = base + 32 - 8;
3864        while (m3) {
3865            if (m3 & 0x8) {
3866                tcg_gen_shri_i64(tmp, o->in1, pos);
3867                tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3868                tcg_gen_addi_i64(o->in2, o->in2, 1);
3869            }
3870            m3 = (m3 << 1) & 0xf;
3871            pos -= 8;
3872        }
3873        break;
3874    }
3875    tcg_temp_free_i64(tmp);
3876    return NO_EXIT;
3877}
3878
3879static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3880{
3881    int r1 = get_field(s->fields, r1);
3882    int r3 = get_field(s->fields, r3);
3883    int size = s->insn->data;
3884    TCGv_i64 tsize = tcg_const_i64(size);
3885
3886    while (1) {
3887        if (size == 8) {
3888            tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3889        } else {
3890            tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3891        }
3892        if (r1 == r3) {
3893            break;
3894        }
3895        tcg_gen_add_i64(o->in2, o->in2, tsize);
3896        r1 = (r1 + 1) & 15;
3897    }
3898
3899    tcg_temp_free_i64(tsize);
3900    return NO_EXIT;
3901}
3902
3903static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3904{
3905    int r1 = get_field(s->fields, r1);
3906    int r3 = get_field(s->fields, r3);
3907    TCGv_i64 t = tcg_temp_new_i64();
3908    TCGv_i64 t4 = tcg_const_i64(4);
3909    TCGv_i64 t32 = tcg_const_i64(32);
3910
3911    while (1) {
3912        tcg_gen_shl_i64(t, regs[r1], t32);
3913        tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3914        if (r1 == r3) {
3915            break;
3916        }
3917        tcg_gen_add_i64(o->in2, o->in2, t4);
3918        r1 = (r1 + 1) & 15;
3919    }
3920
3921    tcg_temp_free_i64(t);
3922    tcg_temp_free_i64(t4);
3923    tcg_temp_free_i64(t32);
3924    return NO_EXIT;
3925}
3926
3927static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3928{
3929    potential_page_fault(s);
3930    gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3931    set_cc_static(s);
3932    return_low128(o->in2);
3933    return NO_EXIT;
3934}
3935
3936static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3937{
3938    tcg_gen_sub_i64(o->out, o->in1, o->in2);
3939    return NO_EXIT;
3940}
3941
3942static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3943{
3944    DisasCompare cmp;
3945    TCGv_i64 borrow;
3946
3947    tcg_gen_sub_i64(o->out, o->in1, o->in2);
3948
3949    /* The !borrow flag is the msb of CC.  Since we want the inverse of
3950       that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4.  */
3951    disas_jcc(s, &cmp, 8 | 4);
3952    borrow = tcg_temp_new_i64();
3953    if (cmp.is_64) {
3954        tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3955    } else {
3956        TCGv_i32 t = tcg_temp_new_i32();
3957        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3958        tcg_gen_extu_i32_i64(borrow, t);
3959        tcg_temp_free_i32(t);
3960    }
3961    free_compare(&cmp);
3962
3963    tcg_gen_sub_i64(o->out, o->out, borrow);
3964    tcg_temp_free_i64(borrow);
3965    return NO_EXIT;
3966}
3967
3968static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3969{
3970    TCGv_i32 t;
3971
3972    update_psw_addr(s);
3973    update_cc_op(s);
3974
3975    t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3976    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3977    tcg_temp_free_i32(t);
3978
3979    t = tcg_const_i32(s->next_pc - s->pc);
3980    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3981    tcg_temp_free_i32(t);
3982
3983    gen_exception(EXCP_SVC);
3984    return EXIT_NORETURN;
3985}
3986
3987static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3988{
3989    gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
3990    set_cc_static(s);
3991    return NO_EXIT;
3992}
3993
3994static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3995{
3996    gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
3997    set_cc_static(s);
3998    return NO_EXIT;
3999}
4000
4001static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4002{
4003    gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4004    set_cc_static(s);
4005    return NO_EXIT;
4006}
4007
4008#ifndef CONFIG_USER_ONLY
4009static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4010{
4011    potential_page_fault(s);
4012    gen_helper_tprot(cc_op, o->addr1, o->in2);
4013    set_cc_static(s);
4014    return NO_EXIT;
4015}
4016#endif
4017
4018static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4019{
4020    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4021    potential_page_fault(s);
4022    gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4023    tcg_temp_free_i32(l);
4024    set_cc_static(s);
4025    return NO_EXIT;
4026}
4027
4028static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4029{
4030    potential_page_fault(s);
4031    gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4032    return_low128(o->out2);
4033    set_cc_static(s);
4034    return NO_EXIT;
4035}
4036
4037static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4038{
4039    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4040    potential_page_fault(s);
4041    gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4042    tcg_temp_free_i32(l);
4043    set_cc_static(s);
4044    return NO_EXIT;
4045}
4046
4047static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4048{
4049    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4050    potential_page_fault(s);
4051    gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4052    tcg_temp_free_i32(l);
4053    return NO_EXIT;
4054}
4055
4056static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4057{
4058    int d1 = get_field(s->fields, d1);
4059    int d2 = get_field(s->fields, d2);
4060    int b1 = get_field(s->fields, b1);
4061    int b2 = get_field(s->fields, b2);
4062    int l = get_field(s->fields, l1);
4063    TCGv_i32 t32;
4064
4065    o->addr1 = get_address(s, 0, b1, d1);
4066
4067    /* If the addresses are identical, this is a store/memset of zero.  */
4068    if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4069        o->in2 = tcg_const_i64(0);
4070
4071        l++;
4072        while (l >= 8) {
4073            tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4074            l -= 8;
4075            if (l > 0) {
4076                tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4077            }
4078        }
4079        if (l >= 4) {
4080            tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4081            l -= 4;
4082            if (l > 0) {
4083                tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4084            }
4085        }
4086        if (l >= 2) {
4087            tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4088            l -= 2;
4089            if (l > 0) {
4090                tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4091            }
4092        }
4093        if (l) {
4094            tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4095        }
4096        gen_op_movi_cc(s, 0);
4097        return NO_EXIT;
4098    }
4099
4100    /* But in general we'll defer to a helper.  */
4101    o->in2 = get_address(s, 0, b2, d2);
4102    t32 = tcg_const_i32(l);
4103    potential_page_fault(s);
4104    gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4105    tcg_temp_free_i32(t32);
4106    set_cc_static(s);
4107    return NO_EXIT;
4108}
4109
4110static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4111{
4112    tcg_gen_xor_i64(o->out, o->in1, o->in2);
4113    return NO_EXIT;
4114}
4115
4116static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4117{
4118    int shift = s->insn->data & 0xff;
4119    int size = s->insn->data >> 8;
4120    uint64_t mask = ((1ull << size) - 1) << shift;
4121
4122    assert(!o->g_in2);
4123    tcg_gen_shli_i64(o->in2, o->in2, shift);
4124    tcg_gen_xor_i64(o->out, o->in1, o->in2);
4125
4126    /* Produce the CC from only the bits manipulated.  */
4127    tcg_gen_andi_i64(cc_dst, o->out, mask);
4128    set_cc_nz_u64(s, cc_dst);
4129    return NO_EXIT;
4130}
4131
4132static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4133{
4134    o->out = tcg_const_i64(0);
4135    return NO_EXIT;
4136}
4137
4138static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4139{
4140    o->out = tcg_const_i64(0);
4141    o->out2 = o->out;
4142    o->g_out2 = true;
4143    return NO_EXIT;
4144}
4145
4146/* ====================================================================== */
4147/* The "Cc OUTput" generators.  Given the generated output (and in some cases
4148   the original inputs), update the various cc data structures in order to
4149   be able to compute the new condition code.  */
4150
4151static void cout_abs32(DisasContext *s, DisasOps *o)
4152{
4153    gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4154}
4155
4156static void cout_abs64(DisasContext *s, DisasOps *o)
4157{
4158    gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4159}
4160
4161static void cout_adds32(DisasContext *s, DisasOps *o)
4162{
4163    gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4164}
4165
4166static void cout_adds64(DisasContext *s, DisasOps *o)
4167{
4168    gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4169}
4170
4171static void cout_addu32(DisasContext *s, DisasOps *o)
4172{
4173    gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4174}
4175
4176static void cout_addu64(DisasContext *s, DisasOps *o)
4177{
4178    gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4179}
4180
4181static void cout_addc32(DisasContext *s, DisasOps *o)
4182{
4183    gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4184}
4185
4186static void cout_addc64(DisasContext *s, DisasOps *o)
4187{
4188    gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4189}
4190
4191static void cout_cmps32(DisasContext *s, DisasOps *o)
4192{
4193    gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4194}
4195
4196static void cout_cmps64(DisasContext *s, DisasOps *o)
4197{
4198    gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4199}
4200
4201static void cout_cmpu32(DisasContext *s, DisasOps *o)
4202{
4203    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4204}
4205
4206static void cout_cmpu64(DisasContext *s, DisasOps *o)
4207{
4208    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4209}
4210
4211static void cout_f32(DisasContext *s, DisasOps *o)
4212{
4213    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4214}
4215
4216static void cout_f64(DisasContext *s, DisasOps *o)
4217{
4218    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4219}
4220
4221static void cout_f128(DisasContext *s, DisasOps *o)
4222{
4223    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4224}
4225
4226static void cout_nabs32(DisasContext *s, DisasOps *o)
4227{
4228    gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4229}
4230
4231static void cout_nabs64(DisasContext *s, DisasOps *o)
4232{
4233    gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4234}
4235
4236static void cout_neg32(DisasContext *s, DisasOps *o)
4237{
4238    gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4239}
4240
4241static void cout_neg64(DisasContext *s, DisasOps *o)
4242{
4243    gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4244}
4245
4246static void cout_nz32(DisasContext *s, DisasOps *o)
4247{
4248    tcg_gen_ext32u_i64(cc_dst, o->out);
4249    gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4250}
4251
4252static void cout_nz64(DisasContext *s, DisasOps *o)
4253{
4254    gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4255}
4256
4257static void cout_s32(DisasContext *s, DisasOps *o)
4258{
4259    gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4260}
4261
4262static void cout_s64(DisasContext *s, DisasOps *o)
4263{
4264    gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4265}
4266
4267static void cout_subs32(DisasContext *s, DisasOps *o)
4268{
4269    gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4270}
4271
4272static void cout_subs64(DisasContext *s, DisasOps *o)
4273{
4274    gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4275}
4276
4277static void cout_subu32(DisasContext *s, DisasOps *o)
4278{
4279    gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4280}
4281
4282static void cout_subu64(DisasContext *s, DisasOps *o)
4283{
4284    gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4285}
4286
4287static void cout_subb32(DisasContext *s, DisasOps *o)
4288{
4289    gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4290}
4291
4292static void cout_subb64(DisasContext *s, DisasOps *o)
4293{
4294    gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4295}
4296
4297static void cout_tm32(DisasContext *s, DisasOps *o)
4298{
4299    gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4300}
4301
4302static void cout_tm64(DisasContext *s, DisasOps *o)
4303{
4304    gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4305}
4306
4307/* ====================================================================== */
4308/* The "PREParation" generators.  These initialize the DisasOps.OUT fields
4309   with the TCG register to which we will write.  Used in combination with
4310   the "wout" generators, in some cases we need a new temporary, and in
4311   some cases we can write to a TCG global.  */
4312
4313static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4314{
4315    o->out = tcg_temp_new_i64();
4316}
4317#define SPEC_prep_new 0
4318
4319static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4320{
4321    o->out = tcg_temp_new_i64();
4322    o->out2 = tcg_temp_new_i64();
4323}
4324#define SPEC_prep_new_P 0
4325
4326static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4327{
4328    o->out = regs[get_field(f, r1)];
4329    o->g_out = true;
4330}
4331#define SPEC_prep_r1 0
4332
4333static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4334{
4335    int r1 = get_field(f, r1);
4336    o->out = regs[r1];
4337    o->out2 = regs[r1 + 1];
4338    o->g_out = o->g_out2 = true;
4339}
4340#define SPEC_prep_r1_P SPEC_r1_even
4341
4342static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4343{
4344    o->out = fregs[get_field(f, r1)];
4345    o->g_out = true;
4346}
4347#define SPEC_prep_f1 0
4348
4349static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4350{
4351    int r1 = get_field(f, r1);
4352    o->out = fregs[r1];
4353    o->out2 = fregs[r1 + 2];
4354    o->g_out = o->g_out2 = true;
4355}
4356#define SPEC_prep_x1 SPEC_r1_f128
4357
4358/* ====================================================================== */
4359/* The "Write OUTput" generators.  These generally perform some non-trivial
4360   copy of data to TCG globals, or to main memory.  The trivial cases are
4361   generally handled by having a "prep" generator install the TCG global
4362   as the destination of the operation.  */
4363
4364static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4365{
4366    store_reg(get_field(f, r1), o->out);
4367}
4368#define SPEC_wout_r1 0
4369
4370static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4371{
4372    int r1 = get_field(f, r1);
4373    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4374}
4375#define SPEC_wout_r1_8 0
4376
4377static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4378{
4379    int r1 = get_field(f, r1);
4380    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4381}
4382#define SPEC_wout_r1_16 0
4383
4384static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4385{
4386    store_reg32_i64(get_field(f, r1), o->out);
4387}
4388#define SPEC_wout_r1_32 0
4389
4390static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4391{
4392    store_reg32h_i64(get_field(f, r1), o->out);
4393}
4394#define SPEC_wout_r1_32h 0
4395
4396static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4397{
4398    int r1 = get_field(f, r1);
4399    store_reg32_i64(r1, o->out);
4400    store_reg32_i64(r1 + 1, o->out2);
4401}
4402#define SPEC_wout_r1_P32 SPEC_r1_even
4403
4404static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4405{
4406    int r1 = get_field(f, r1);
4407    store_reg32_i64(r1 + 1, o->out);
4408    tcg_gen_shri_i64(o->out, o->out, 32);
4409    store_reg32_i64(r1, o->out);
4410}
4411#define SPEC_wout_r1_D32 SPEC_r1_even
4412
4413static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4414{
4415    store_freg32_i64(get_field(f, r1), o->out);
4416}
4417#define SPEC_wout_e1 0
4418
4419static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4420{
4421    store_freg(get_field(f, r1), o->out);
4422}
4423#define SPEC_wout_f1 0
4424
4425static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4426{
4427    int f1 = get_field(s->fields, r1);
4428    store_freg(f1, o->out);
4429    store_freg(f1 + 2, o->out2);
4430}
4431#define SPEC_wout_x1 SPEC_r1_f128
4432
4433static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4434{
4435    if (get_field(f, r1) != get_field(f, r2)) {
4436        store_reg32_i64(get_field(f, r1), o->out);
4437    }
4438}
4439#define SPEC_wout_cond_r1r2_32 0
4440
4441static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4442{
4443    if (get_field(f, r1) != get_field(f, r2)) {
4444        store_freg32_i64(get_field(f, r1), o->out);
4445    }
4446}
4447#define SPEC_wout_cond_e1e2 0
4448
4449static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4450{
4451    tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4452}
4453#define SPEC_wout_m1_8 0
4454
4455static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4456{
4457    tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4458}
4459#define SPEC_wout_m1_16 0
4460
4461static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4462{
4463    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4464}
4465#define SPEC_wout_m1_32 0
4466
4467static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4468{
4469    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4470}
4471#define SPEC_wout_m1_64 0
4472
4473static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4474{
4475    tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4476}
4477#define SPEC_wout_m2_32 0
4478
4479static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4480{
4481    /* XXX release reservation */
4482    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4483    store_reg32_i64(get_field(f, r1), o->in2);
4484}
4485#define SPEC_wout_m2_32_r1_atomic 0
4486
4487static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4488{
4489    /* XXX release reservation */
4490    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4491    store_reg(get_field(f, r1), o->in2);
4492}
4493#define SPEC_wout_m2_64_r1_atomic 0
4494
4495/* ====================================================================== */
4496/* The "INput 1" generators.  These load the first operand to an insn.  */
4497
4498static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4499{
4500    o->in1 = load_reg(get_field(f, r1));
4501}
4502#define SPEC_in1_r1 0
4503
4504static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4505{
4506    o->in1 = regs[get_field(f, r1)];
4507    o->g_in1 = true;
4508}
4509#define SPEC_in1_r1_o 0
4510
4511static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4512{
4513    o->in1 = tcg_temp_new_i64();
4514    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4515}
4516#define SPEC_in1_r1_32s 0
4517
4518static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4519{
4520    o->in1 = tcg_temp_new_i64();
4521    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4522}
4523#define SPEC_in1_r1_32u 0
4524
4525static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4526{
4527    o->in1 = tcg_temp_new_i64();
4528    tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4529}
4530#define SPEC_in1_r1_sr32 0
4531
4532static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4533{
4534    o->in1 = load_reg(get_field(f, r1) + 1);
4535}
4536#define SPEC_in1_r1p1 SPEC_r1_even
4537
4538static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4539{
4540    o->in1 = tcg_temp_new_i64();
4541    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4542}
4543#define SPEC_in1_r1p1_32s SPEC_r1_even
4544
4545static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4546{
4547    o->in1 = tcg_temp_new_i64();
4548    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4549}
4550#define SPEC_in1_r1p1_32u SPEC_r1_even
4551
4552static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4553{
4554    int r1 = get_field(f, r1);
4555    o->in1 = tcg_temp_new_i64();
4556    tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4557}
4558#define SPEC_in1_r1_D32 SPEC_r1_even
4559
4560static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4561{
4562    o->in1 = load_reg(get_field(f, r2));
4563}
4564#define SPEC_in1_r2 0
4565
4566static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4567{
4568    o->in1 = tcg_temp_new_i64();
4569    tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4570}
4571#define SPEC_in1_r2_sr32 0
4572
4573static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4574{
4575    o->in1 = load_reg(get_field(f, r3));
4576}
4577#define SPEC_in1_r3 0
4578
4579static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4580{
4581    o->in1 = regs[get_field(f, r3)];
4582    o->g_in1 = true;
4583}
4584#define SPEC_in1_r3_o 0
4585
4586static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4587{
4588    o->in1 = tcg_temp_new_i64();
4589    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4590}
4591#define SPEC_in1_r3_32s 0
4592
4593static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4594{
4595    o->in1 = tcg_temp_new_i64();
4596    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4597}
4598#define SPEC_in1_r3_32u 0
4599
4600static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4601{
4602    int r3 = get_field(f, r3);
4603    o->in1 = tcg_temp_new_i64();
4604    tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4605}
4606#define SPEC_in1_r3_D32 SPEC_r3_even
4607
4608static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4609{
4610    o->in1 = load_freg32_i64(get_field(f, r1));
4611}
4612#define SPEC_in1_e1 0
4613
4614static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4615{
4616    o->in1 = fregs[get_field(f, r1)];
4617    o->g_in1 = true;
4618}
4619#define SPEC_in1_f1_o 0
4620
4621static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4622{
4623    int r1 = get_field(f, r1);
4624    o->out = fregs[r1];
4625    o->out2 = fregs[r1 + 2];
4626    o->g_out = o->g_out2 = true;
4627}
4628#define SPEC_in1_x1_o SPEC_r1_f128
4629
4630static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4631{
4632    o->in1 = fregs[get_field(f, r3)];
4633    o->g_in1 = true;
4634}
4635#define SPEC_in1_f3_o 0
4636
4637static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4638{
4639    o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4640}
4641#define SPEC_in1_la1 0
4642
4643static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4644{
4645    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4646    o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4647}
4648#define SPEC_in1_la2 0
4649
4650static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4651{
4652    in1_la1(s, f, o);
4653    o->in1 = tcg_temp_new_i64();
4654    tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4655}
4656#define SPEC_in1_m1_8u 0
4657
4658static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4659{
4660    in1_la1(s, f, o);
4661    o->in1 = tcg_temp_new_i64();
4662    tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4663}
4664#define SPEC_in1_m1_16s 0
4665
4666static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4667{
4668    in1_la1(s, f, o);
4669    o->in1 = tcg_temp_new_i64();
4670    tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4671}
4672#define SPEC_in1_m1_16u 0
4673
4674static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4675{
4676    in1_la1(s, f, o);
4677    o->in1 = tcg_temp_new_i64();
4678    tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4679}
4680#define SPEC_in1_m1_32s 0
4681
4682static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4683{
4684    in1_la1(s, f, o);
4685    o->in1 = tcg_temp_new_i64();
4686    tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4687}
4688#define SPEC_in1_m1_32u 0
4689
4690static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4691{
4692    in1_la1(s, f, o);
4693    o->in1 = tcg_temp_new_i64();
4694    tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4695}
4696#define SPEC_in1_m1_64 0
4697
4698/* ====================================================================== */
4699/* The "INput 2" generators.  These load the second operand to an insn.  */
4700
4701static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4702{
4703    o->in2 = regs[get_field(f, r1)];
4704    o->g_in2 = true;
4705}
4706#define SPEC_in2_r1_o 0
4707
4708static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4709{
4710    o->in2 = tcg_temp_new_i64();
4711    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4712}
4713#define SPEC_in2_r1_16u 0
4714
4715static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4716{
4717    o->in2 = tcg_temp_new_i64();
4718    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4719}
4720#define SPEC_in2_r1_32u 0
4721
4722static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4723{
4724    int r1 = get_field(f, r1);
4725    o->in2 = tcg_temp_new_i64();
4726    tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4727}
4728#define SPEC_in2_r1_D32 SPEC_r1_even
4729
4730static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4731{
4732    o->in2 = load_reg(get_field(f, r2));
4733}
4734#define SPEC_in2_r2 0
4735
4736static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4737{
4738    o->in2 = regs[get_field(f, r2)];
4739    o->g_in2 = true;
4740}
4741#define SPEC_in2_r2_o 0
4742
4743static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4744{
4745    int r2 = get_field(f, r2);
4746    if (r2 != 0) {
4747        o->in2 = load_reg(r2);
4748    }
4749}
4750#define SPEC_in2_r2_nz 0
4751
4752static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4753{
4754    o->in2 = tcg_temp_new_i64();
4755    tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4756}
4757#define SPEC_in2_r2_8s 0
4758
4759static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4760{
4761    o->in2 = tcg_temp_new_i64();
4762    tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4763}
4764#define SPEC_in2_r2_8u 0
4765
4766static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4767{
4768    o->in2 = tcg_temp_new_i64();
4769    tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4770}
4771#define SPEC_in2_r2_16s 0
4772
4773static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4774{
4775    o->in2 = tcg_temp_new_i64();
4776    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4777}
4778#define SPEC_in2_r2_16u 0
4779
4780static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4781{
4782    o->in2 = load_reg(get_field(f, r3));
4783}
4784#define SPEC_in2_r3 0
4785
4786static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4787{
4788    o->in2 = tcg_temp_new_i64();
4789    tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4790}
4791#define SPEC_in2_r3_sr32 0
4792
4793static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4794{
4795    o->in2 = tcg_temp_new_i64();
4796    tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4797}
4798#define SPEC_in2_r2_32s 0
4799
4800static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4801{
4802    o->in2 = tcg_temp_new_i64();
4803    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4804}
4805#define SPEC_in2_r2_32u 0
4806
4807static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4808{
4809    o->in2 = tcg_temp_new_i64();
4810    tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4811}
4812#define SPEC_in2_r2_sr32 0
4813
4814static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4815{
4816    o->in2 = load_freg32_i64(get_field(f, r2));
4817}
4818#define SPEC_in2_e2 0
4819
4820static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4821{
4822    o->in2 = fregs[get_field(f, r2)];
4823    o->g_in2 = true;
4824}
4825#define SPEC_in2_f2_o 0
4826
4827static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4828{
4829    int r2 = get_field(f, r2);
4830    o->in1 = fregs[r2];
4831    o->in2 = fregs[r2 + 2];
4832    o->g_in1 = o->g_in2 = true;
4833}
4834#define SPEC_in2_x2_o SPEC_r2_f128
4835
4836static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4837{
4838    o->in2 = get_address(s, 0, get_field(f, r2), 0);
4839}
4840#define SPEC_in2_ra2 0
4841
4842static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4843{
4844    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4845    o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4846}
4847#define SPEC_in2_a2 0
4848
4849static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4850{
4851    o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4852}
4853#define SPEC_in2_ri2 0
4854
4855static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4856{
4857    help_l2_shift(s, f, o, 31);
4858}
4859#define SPEC_in2_sh32 0
4860
4861static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4862{
4863    help_l2_shift(s, f, o, 63);
4864}
4865#define SPEC_in2_sh64 0
4866
4867static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4868{
4869    in2_a2(s, f, o);
4870    tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4871}
4872#define SPEC_in2_m2_8u 0
4873
4874static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4875{
4876    in2_a2(s, f, o);
4877    tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4878}
4879#define SPEC_in2_m2_16s 0
4880
4881static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4882{
4883    in2_a2(s, f, o);
4884    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4885}
4886#define SPEC_in2_m2_16u 0
4887
4888static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4889{
4890    in2_a2(s, f, o);
4891    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4892}
4893#define SPEC_in2_m2_32s 0
4894
4895static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4896{
4897    in2_a2(s, f, o);
4898    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4899}
4900#define SPEC_in2_m2_32u 0
4901
4902static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4903{
4904    in2_a2(s, f, o);
4905    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4906}
4907#define SPEC_in2_m2_64 0
4908
4909static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4910{
4911    in2_ri2(s, f, o);
4912    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4913}
4914#define SPEC_in2_mri2_16u 0
4915
4916static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4917{
4918    in2_ri2(s, f, o);
4919    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4920}
4921#define SPEC_in2_mri2_32s 0
4922
4923static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4924{
4925    in2_ri2(s, f, o);
4926    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4927}
4928#define SPEC_in2_mri2_32u 0
4929
4930static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4931{
4932    in2_ri2(s, f, o);
4933    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4934}
4935#define SPEC_in2_mri2_64 0
4936
4937static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4938{
4939    /* XXX should reserve the address */
4940    in1_la2(s, f, o);
4941    o->in2 = tcg_temp_new_i64();
4942    tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4943}
4944#define SPEC_in2_m2_32s_atomic 0
4945
4946static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4947{
4948    /* XXX should reserve the address */
4949    in1_la2(s, f, o);
4950    o->in2 = tcg_temp_new_i64();
4951    tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4952}
4953#define SPEC_in2_m2_64_atomic 0
4954
4955static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4956{
4957    o->in2 = tcg_const_i64(get_field(f, i2));
4958}
4959#define SPEC_in2_i2 0
4960
4961static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4962{
4963    o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4964}
4965#define SPEC_in2_i2_8u 0
4966
4967static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4968{
4969    o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4970}
4971#define SPEC_in2_i2_16u 0
4972
4973static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4974{
4975    o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4976}
4977#define SPEC_in2_i2_32u 0
4978
4979static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4980{
4981    uint64_t i2 = (uint16_t)get_field(f, i2);
4982    o->in2 = tcg_const_i64(i2 << s->insn->data);
4983}
4984#define SPEC_in2_i2_16u_shl 0
4985
4986static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4987{
4988    uint64_t i2 = (uint32_t)get_field(f, i2);
4989    o->in2 = tcg_const_i64(i2 << s->insn->data);
4990}
4991#define SPEC_in2_i2_32u_shl 0
4992
4993#ifndef CONFIG_USER_ONLY
4994static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
4995{
4996    o->in2 = tcg_const_i64(s->fields->raw_insn);
4997}
4998#define SPEC_in2_insn 0
4999#endif
5000
5001/* ====================================================================== */
5002
5003/* Find opc within the table of insns.  This is formulated as a switch
5004   statement so that (1) we get compile-time notice of cut-paste errors
5005   for duplicated opcodes, and (2) the compiler generates the binary
5006   search tree, rather than us having to post-process the table.  */
5007
5008#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5009    D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5010
5011#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5012
5013enum DisasInsnEnum {
5014#include "insn-data.def"
5015};
5016
5017#undef D
5018#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) {                       \
5019    .opc = OPC,                                                             \
5020    .fmt = FMT_##FT,                                                        \
5021    .fac = FAC_##FC,                                                        \
5022    .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
5023    .name = #NM,                                                            \
5024    .help_in1 = in1_##I1,                                                   \
5025    .help_in2 = in2_##I2,                                                   \
5026    .help_prep = prep_##P,                                                  \
5027    .help_wout = wout_##W,                                                  \
5028    .help_cout = cout_##CC,                                                 \
5029    .help_op = op_##OP,                                                     \
5030    .data = D                                                               \
5031 },
5032
5033/* Allow 0 to be used for NULL in the table below.  */
5034#define in1_0  NULL
5035#define in2_0  NULL
5036#define prep_0  NULL
5037#define wout_0  NULL
5038#define cout_0  NULL
5039#define op_0  NULL
5040
5041#define SPEC_in1_0 0
5042#define SPEC_in2_0 0
5043#define SPEC_prep_0 0
5044#define SPEC_wout_0 0
5045
5046static const DisasInsn insn_info[] = {
5047#include "insn-data.def"
5048};
5049
5050#undef D
5051#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5052    case OPC: return &insn_info[insn_ ## NM];
5053
5054static const DisasInsn *lookup_opc(uint16_t opc)
5055{
5056    switch (opc) {
5057#include "insn-data.def"
5058    default:
5059        return NULL;
5060    }
5061}
5062
5063#undef D
5064#undef C
5065
5066/* Extract a field from the insn.  The INSN should be left-aligned in
5067   the uint64_t so that we can more easily utilize the big-bit-endian
5068   definitions we extract from the Principals of Operation.  */
5069
5070static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5071{
5072    uint32_t r, m;
5073
5074    if (f->size == 0) {
5075        return;
5076    }
5077
5078    /* Zero extract the field from the insn.  */
5079    r = (insn << f->beg) >> (64 - f->size);
5080
5081    /* Sign-extend, or un-swap the field as necessary.  */
5082    switch (f->type) {
5083    case 0: /* unsigned */
5084        break;
5085    case 1: /* signed */
5086        assert(f->size <= 32);
5087        m = 1u << (f->size - 1);
5088        r = (r ^ m) - m;
5089        break;
5090    case 2: /* dl+dh split, signed 20 bit. */
5091        r = ((int8_t)r << 12) | (r >> 8);
5092        break;
5093    default:
5094        abort();
5095    }
5096
5097    /* Validate that the "compressed" encoding we selected above is valid.
5098       I.e. we havn't make two different original fields overlap.  */
5099    assert(((o->presentC >> f->indexC) & 1) == 0);
5100    o->presentC |= 1 << f->indexC;
5101    o->presentO |= 1 << f->indexO;
5102
5103    o->c[f->indexC] = r;
5104}
5105
5106/* Lookup the insn at the current PC, extracting the operands into O and
5107   returning the info struct for the insn.  Returns NULL for invalid insn.  */
5108
5109static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5110                                     DisasFields *f)
5111{
5112    uint64_t insn, pc = s->pc;
5113    int op, op2, ilen;
5114    const DisasInsn *info;
5115
5116    insn = ld_code2(env, pc);
5117    op = (insn >> 8) & 0xff;
5118    ilen = get_ilen(op);
5119    s->next_pc = s->pc + ilen;
5120
5121    switch (ilen) {
5122    case 2:
5123        insn = insn << 48;
5124        break;
5125    case 4:
5126        insn = ld_code4(env, pc) << 32;
5127        break;
5128    case 6:
5129        insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5130        break;
5131    default:
5132        abort();
5133    }
5134
5135    /* We can't actually determine the insn format until we've looked up
5136       the full insn opcode.  Which we can't do without locating the
5137       secondary opcode.  Assume by default that OP2 is at bit 40; for
5138       those smaller insns that don't actually have a secondary opcode
5139       this will correctly result in OP2 = 0. */
5140    switch (op) {
5141    case 0x01: /* E */
5142    case 0x80: /* S */
5143    case 0x82: /* S */
5144    case 0x93: /* S */
5145    case 0xb2: /* S, RRF, RRE */
5146    case 0xb3: /* RRE, RRD, RRF */
5147    case 0xb9: /* RRE, RRF */
5148    case 0xe5: /* SSE, SIL */
5149        op2 = (insn << 8) >> 56;
5150        break;
5151    case 0xa5: /* RI */
5152    case 0xa7: /* RI */
5153    case 0xc0: /* RIL */
5154    case 0xc2: /* RIL */
5155    case 0xc4: /* RIL */
5156    case 0xc6: /* RIL */
5157    case 0xc8: /* SSF */
5158    case 0xcc: /* RIL */
5159        op2 = (insn << 12) >> 60;
5160        break;
5161    case 0xd0 ... 0xdf: /* SS */
5162    case 0xe1: /* SS */
5163    case 0xe2: /* SS */
5164    case 0xe8: /* SS */
5165    case 0xe9: /* SS */
5166    case 0xea: /* SS */
5167    case 0xee ... 0xf3: /* SS */
5168    case 0xf8 ... 0xfd: /* SS */
5169        op2 = 0;
5170        break;
5171    default:
5172        op2 = (insn << 40) >> 56;
5173        break;
5174    }
5175
5176    memset(f, 0, sizeof(*f));
5177    f->raw_insn = insn;
5178    f->op = op;
5179    f->op2 = op2;
5180
5181    /* Lookup the instruction.  */
5182    info = lookup_opc(op << 8 | op2);
5183
5184    /* If we found it, extract the operands.  */
5185    if (info != NULL) {
5186        DisasFormat fmt = info->fmt;
5187        int i;
5188
5189        for (i = 0; i < NUM_C_FIELD; ++i) {
5190            extract_field(f, &format_info[fmt].op[i], insn);
5191        }
5192    }
5193    return info;
5194}
5195
5196static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5197{
5198    const DisasInsn *insn;
5199    ExitStatus ret = NO_EXIT;
5200    DisasFields f;
5201    DisasOps o;
5202
5203    /* Search for the insn in the table.  */
5204    insn = extract_insn(env, s, &f);
5205
5206    /* Not found means unimplemented/illegal opcode.  */
5207    if (insn == NULL) {
5208        qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5209                      f.op, f.op2);
5210        gen_illegal_opcode(s);
5211        return EXIT_NORETURN;
5212    }
5213
5214#ifndef CONFIG_USER_ONLY
5215    if (s->tb->flags & FLAG_MASK_PER) {
5216        TCGv_i64 addr = tcg_const_i64(s->pc);
5217        gen_helper_per_ifetch(cpu_env, addr);
5218        tcg_temp_free_i64(addr);
5219    }
5220#endif
5221
5222    /* Check for insn specification exceptions.  */
5223    if (insn->spec) {
5224        int spec = insn->spec, excp = 0, r;
5225
5226        if (spec & SPEC_r1_even) {
5227            r = get_field(&f, r1);
5228            if (r & 1) {
5229                excp = PGM_SPECIFICATION;
5230            }
5231        }
5232        if (spec & SPEC_r2_even) {
5233            r = get_field(&f, r2);
5234            if (r & 1) {
5235                excp = PGM_SPECIFICATION;
5236            }
5237        }
5238        if (spec & SPEC_r3_even) {
5239            r = get_field(&f, r3);
5240            if (r & 1) {
5241                excp = PGM_SPECIFICATION;
5242            }
5243        }
5244        if (spec & SPEC_r1_f128) {
5245            r = get_field(&f, r1);
5246            if (r > 13) {
5247                excp = PGM_SPECIFICATION;
5248            }
5249        }
5250        if (spec & SPEC_r2_f128) {
5251            r = get_field(&f, r2);
5252            if (r > 13) {
5253                excp = PGM_SPECIFICATION;
5254            }
5255        }
5256        if (excp) {
5257            gen_program_exception(s, excp);
5258            return EXIT_NORETURN;
5259        }
5260    }
5261
5262    /* Set up the strutures we use to communicate with the helpers. */
5263    s->insn = insn;
5264    s->fields = &f;
5265    o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5266    TCGV_UNUSED_I64(o.out);
5267    TCGV_UNUSED_I64(o.out2);
5268    TCGV_UNUSED_I64(o.in1);
5269    TCGV_UNUSED_I64(o.in2);
5270    TCGV_UNUSED_I64(o.addr1);
5271
5272    /* Implement the instruction.  */
5273    if (insn->help_in1) {
5274        insn->help_in1(s, &f, &o);
5275    }
5276    if (insn->help_in2) {
5277        insn->help_in2(s, &f, &o);
5278    }
5279    if (insn->help_prep) {
5280        insn->help_prep(s, &f, &o);
5281    }
5282    if (insn->help_op) {
5283        ret = insn->help_op(s, &o);
5284    }
5285    if (insn->help_wout) {
5286        insn->help_wout(s, &f, &o);
5287    }
5288    if (insn->help_cout) {
5289        insn->help_cout(s, &o);
5290    }
5291
5292    /* Free any temporaries created by the helpers.  */
5293    if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5294        tcg_temp_free_i64(o.out);
5295    }
5296    if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5297        tcg_temp_free_i64(o.out2);
5298    }
5299    if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5300        tcg_temp_free_i64(o.in1);
5301    }
5302    if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5303        tcg_temp_free_i64(o.in2);
5304    }
5305    if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5306        tcg_temp_free_i64(o.addr1);
5307    }
5308
5309#ifndef CONFIG_USER_ONLY
5310    if (s->tb->flags & FLAG_MASK_PER) {
5311        /* An exception might be triggered, save PSW if not already done.  */
5312        if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5313            tcg_gen_movi_i64(psw_addr, s->next_pc);
5314        }
5315
5316        /* Save off cc.  */
5317        update_cc_op(s);
5318
5319        /* Call the helper to check for a possible PER exception.  */
5320        gen_helper_per_check_exception(cpu_env);
5321    }
5322#endif
5323
5324    /* Advance to the next instruction.  */
5325    s->pc = s->next_pc;
5326    return ret;
5327}
5328
5329void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
5330{
5331    S390CPU *cpu = s390_env_get_cpu(env);
5332    CPUState *cs = CPU(cpu);
5333    DisasContext dc;
5334    target_ulong pc_start;
5335    uint64_t next_page_start;
5336    int num_insns, max_insns;
5337    ExitStatus status;
5338    bool do_debug;
5339
5340    pc_start = tb->pc;
5341
5342    /* 31-bit mode */
5343    if (!(tb->flags & FLAG_MASK_64)) {
5344        pc_start &= 0x7fffffff;
5345    }
5346
5347    dc.tb = tb;
5348    dc.pc = pc_start;
5349    dc.cc_op = CC_OP_DYNAMIC;
5350    do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5351
5352    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5353
5354    num_insns = 0;
5355    max_insns = tb->cflags & CF_COUNT_MASK;
5356    if (max_insns == 0) {
5357        max_insns = CF_COUNT_MASK;
5358    }
5359    if (max_insns > TCG_MAX_INSNS) {
5360        max_insns = TCG_MAX_INSNS;
5361    }
5362
5363    gen_tb_start(tb);
5364
5365    do {
5366        tcg_gen_insn_start(dc.pc, dc.cc_op);
5367        num_insns++;
5368
5369        if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5370            status = EXIT_PC_STALE;
5371            do_debug = true;
5372            /* The address covered by the breakpoint must be included in
5373               [tb->pc, tb->pc + tb->size) in order to for it to be
5374               properly cleared -- thus we increment the PC here so that
5375               the logic setting tb->size below does the right thing.  */
5376            dc.pc += 2;
5377            break;
5378        }
5379
5380        if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5381            gen_io_start();
5382        }
5383
5384        status = NO_EXIT;
5385        if (status == NO_EXIT) {
5386            status = translate_one(env, &dc);
5387        }
5388
5389        /* If we reach a page boundary, are single stepping,
5390           or exhaust instruction count, stop generation.  */
5391        if (status == NO_EXIT
5392            && (dc.pc >= next_page_start
5393                || tcg_op_buf_full()
5394                || num_insns >= max_insns
5395                || singlestep
5396                || cs->singlestep_enabled)) {
5397            status = EXIT_PC_STALE;
5398        }
5399    } while (status == NO_EXIT);
5400
5401    if (tb->cflags & CF_LAST_IO) {
5402        gen_io_end();
5403    }
5404
5405    switch (status) {
5406    case EXIT_GOTO_TB:
5407    case EXIT_NORETURN:
5408        break;
5409    case EXIT_PC_STALE:
5410        update_psw_addr(&dc);
5411        /* FALLTHRU */
5412    case EXIT_PC_UPDATED:
5413        /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5414           cc op type is in env */
5415        update_cc_op(&dc);
5416        /* Exit the TB, either by raising a debug exception or by return.  */
5417        if (do_debug) {
5418            gen_exception(EXCP_DEBUG);
5419        } else {
5420            tcg_gen_exit_tb(0);
5421        }
5422        break;
5423    default:
5424        abort();
5425    }
5426
5427    gen_tb_end(tb, num_insns);
5428
5429    tb->size = dc.pc - pc_start;
5430    tb->icount = num_insns;
5431
5432#if defined(S390X_DEBUG_DISAS)
5433    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5434        && qemu_log_in_addr_range(pc_start)) {
5435        qemu_log("IN: %s\n", lookup_symbol(pc_start));
5436        log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5437        qemu_log("\n");
5438    }
5439#endif
5440}
5441
5442void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5443                          target_ulong *data)
5444{
5445    int cc_op = data[1];
5446    env->psw.addr = data[0];
5447    if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5448        env->cc_op = cc_op;
5449    }
5450}
5451