qemu/target-s390x/translate.c
<<
>>
Prefs
   1/*
   2 *  S/390 translation
   3 *
   4 *  Copyright (c) 2009 Ulrich Hecht
   5 *  Copyright (c) 2010 Alexander Graf
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21/* #define DEBUG_INLINE_BRANCHES */
  22#define S390X_DEBUG_DISAS
  23/* #define S390X_DEBUG_DISAS_VERBOSE */
  24
  25#ifdef S390X_DEBUG_DISAS_VERBOSE
  26#  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
  27#else
  28#  define LOG_DISAS(...) do { } while (0)
  29#endif
  30
  31#include "cpu.h"
  32#include "disas/disas.h"
  33#include "tcg-op.h"
  34#include "qemu/log.h"
  35#include "qemu/host-utils.h"
  36
  37/* global register indexes */
  38static TCGv_ptr cpu_env;
  39
  40#include "exec/gen-icount.h"
  41#include "helper.h"
  42#define GEN_HELPER 1
  43#include "helper.h"
  44
  45
  46/* Information that (most) every instruction needs to manipulate.  */
  47typedef struct DisasContext DisasContext;
  48typedef struct DisasInsn DisasInsn;
  49typedef struct DisasFields DisasFields;
  50
  51struct DisasContext {
  52    struct TranslationBlock *tb;
  53    const DisasInsn *insn;
  54    DisasFields *fields;
  55    uint64_t pc, next_pc;
  56    enum cc_op cc_op;
  57    bool singlestep_enabled;
  58};
  59
  60/* Information carried about a condition to be evaluated.  */
  61typedef struct {
  62    TCGCond cond:8;
  63    bool is_64;
  64    bool g1;
  65    bool g2;
  66    union {
  67        struct { TCGv_i64 a, b; } s64;
  68        struct { TCGv_i32 a, b; } s32;
  69    } u;
  70} DisasCompare;
  71
  72#define DISAS_EXCP 4
  73
  74#ifdef DEBUG_INLINE_BRANCHES
  75static uint64_t inline_branch_hit[CC_OP_MAX];
  76static uint64_t inline_branch_miss[CC_OP_MAX];
  77#endif
  78
  79static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
  80{
  81    if (!(s->tb->flags & FLAG_MASK_64)) {
  82        if (s->tb->flags & FLAG_MASK_32) {
  83            return pc | 0x80000000;
  84        }
  85    }
  86    return pc;
  87}
  88
  89void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
  90                    int flags)
  91{
  92    int i;
  93
  94    if (env->cc_op > 3) {
  95        cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
  96                    env->psw.mask, env->psw.addr, cc_name(env->cc_op));
  97    } else {
  98        cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
  99                    env->psw.mask, env->psw.addr, env->cc_op);
 100    }
 101
 102    for (i = 0; i < 16; i++) {
 103        cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
 104        if ((i % 4) == 3) {
 105            cpu_fprintf(f, "\n");
 106        } else {
 107            cpu_fprintf(f, " ");
 108        }
 109    }
 110
 111    for (i = 0; i < 16; i++) {
 112        cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
 113        if ((i % 4) == 3) {
 114            cpu_fprintf(f, "\n");
 115        } else {
 116            cpu_fprintf(f, " ");
 117        }
 118    }
 119
 120#ifndef CONFIG_USER_ONLY
 121    for (i = 0; i < 16; i++) {
 122        cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
 123        if ((i % 4) == 3) {
 124            cpu_fprintf(f, "\n");
 125        } else {
 126            cpu_fprintf(f, " ");
 127        }
 128    }
 129#endif
 130
 131#ifdef DEBUG_INLINE_BRANCHES
 132    for (i = 0; i < CC_OP_MAX; i++) {
 133        cpu_fprintf(f, "  %15s = %10ld\t%10ld\n", cc_name(i),
 134                    inline_branch_miss[i], inline_branch_hit[i]);
 135    }
 136#endif
 137
 138    cpu_fprintf(f, "\n");
 139}
 140
 141static TCGv_i64 psw_addr;
 142static TCGv_i64 psw_mask;
 143
 144static TCGv_i32 cc_op;
 145static TCGv_i64 cc_src;
 146static TCGv_i64 cc_dst;
 147static TCGv_i64 cc_vr;
 148
 149static char cpu_reg_names[32][4];
 150static TCGv_i64 regs[16];
 151static TCGv_i64 fregs[16];
 152
 153static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
 154
 155void s390x_translate_init(void)
 156{
 157    int i;
 158
 159    cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
 160    psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
 161                                      offsetof(CPUS390XState, psw.addr),
 162                                      "psw_addr");
 163    psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
 164                                      offsetof(CPUS390XState, psw.mask),
 165                                      "psw_mask");
 166
 167    cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
 168                                   "cc_op");
 169    cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
 170                                    "cc_src");
 171    cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
 172                                    "cc_dst");
 173    cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
 174                                   "cc_vr");
 175
 176    for (i = 0; i < 16; i++) {
 177        snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
 178        regs[i] = tcg_global_mem_new(TCG_AREG0,
 179                                     offsetof(CPUS390XState, regs[i]),
 180                                     cpu_reg_names[i]);
 181    }
 182
 183    for (i = 0; i < 16; i++) {
 184        snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
 185        fregs[i] = tcg_global_mem_new(TCG_AREG0,
 186                                      offsetof(CPUS390XState, fregs[i].d),
 187                                      cpu_reg_names[i + 16]);
 188    }
 189
 190    /* register helpers */
 191#define GEN_HELPER 2
 192#include "helper.h"
 193}
 194
 195static TCGv_i64 load_reg(int reg)
 196{
 197    TCGv_i64 r = tcg_temp_new_i64();
 198    tcg_gen_mov_i64(r, regs[reg]);
 199    return r;
 200}
 201
 202static TCGv_i64 load_freg32_i64(int reg)
 203{
 204    TCGv_i64 r = tcg_temp_new_i64();
 205    tcg_gen_shri_i64(r, fregs[reg], 32);
 206    return r;
 207}
 208
 209static void store_reg(int reg, TCGv_i64 v)
 210{
 211    tcg_gen_mov_i64(regs[reg], v);
 212}
 213
 214static void store_freg(int reg, TCGv_i64 v)
 215{
 216    tcg_gen_mov_i64(fregs[reg], v);
 217}
 218
 219static void store_reg32_i64(int reg, TCGv_i64 v)
 220{
 221    /* 32 bit register writes keep the upper half */
 222    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
 223}
 224
 225static void store_reg32h_i64(int reg, TCGv_i64 v)
 226{
 227    tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
 228}
 229
 230static void store_freg32_i64(int reg, TCGv_i64 v)
 231{
 232    tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
 233}
 234
 235static void return_low128(TCGv_i64 dest)
 236{
 237    tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
 238}
 239
 240static void update_psw_addr(DisasContext *s)
 241{
 242    /* psw.addr */
 243    tcg_gen_movi_i64(psw_addr, s->pc);
 244}
 245
 246static void update_cc_op(DisasContext *s)
 247{
 248    if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
 249        tcg_gen_movi_i32(cc_op, s->cc_op);
 250    }
 251}
 252
 253static void potential_page_fault(DisasContext *s)
 254{
 255    update_psw_addr(s);
 256    update_cc_op(s);
 257}
 258
 259static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
 260{
 261    return (uint64_t)cpu_lduw_code(env, pc);
 262}
 263
 264static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
 265{
 266    return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
 267}
 268
 269static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
 270{
 271    return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
 272}
 273
 274static int get_mem_index(DisasContext *s)
 275{
 276    switch (s->tb->flags & FLAG_MASK_ASC) {
 277    case PSW_ASC_PRIMARY >> 32:
 278        return 0;
 279    case PSW_ASC_SECONDARY >> 32:
 280        return 1;
 281    case PSW_ASC_HOME >> 32:
 282        return 2;
 283    default:
 284        tcg_abort();
 285        break;
 286    }
 287}
 288
 289static void gen_exception(int excp)
 290{
 291    TCGv_i32 tmp = tcg_const_i32(excp);
 292    gen_helper_exception(cpu_env, tmp);
 293    tcg_temp_free_i32(tmp);
 294}
 295
 296static void gen_program_exception(DisasContext *s, int code)
 297{
 298    TCGv_i32 tmp;
 299
 300    /* Remember what pgm exeption this was.  */
 301    tmp = tcg_const_i32(code);
 302    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
 303    tcg_temp_free_i32(tmp);
 304
 305    tmp = tcg_const_i32(s->next_pc - s->pc);
 306    tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
 307    tcg_temp_free_i32(tmp);
 308
 309    /* Advance past instruction.  */
 310    s->pc = s->next_pc;
 311    update_psw_addr(s);
 312
 313    /* Save off cc.  */
 314    update_cc_op(s);
 315
 316    /* Trigger exception.  */
 317    gen_exception(EXCP_PGM);
 318}
 319
 320static inline void gen_illegal_opcode(DisasContext *s)
 321{
 322    gen_program_exception(s, PGM_SPECIFICATION);
 323}
 324
 325static inline void check_privileged(DisasContext *s)
 326{
 327    if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
 328        gen_program_exception(s, PGM_PRIVILEGED);
 329    }
 330}
 331
 332static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
 333{
 334    TCGv_i64 tmp = tcg_temp_new_i64();
 335    bool need_31 = !(s->tb->flags & FLAG_MASK_64);
 336
 337    /* Note that d2 is limited to 20 bits, signed.  If we crop negative
 338       displacements early we create larger immedate addends.  */
 339
 340    /* Note that addi optimizes the imm==0 case.  */
 341    if (b2 && x2) {
 342        tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
 343        tcg_gen_addi_i64(tmp, tmp, d2);
 344    } else if (b2) {
 345        tcg_gen_addi_i64(tmp, regs[b2], d2);
 346    } else if (x2) {
 347        tcg_gen_addi_i64(tmp, regs[x2], d2);
 348    } else {
 349        if (need_31) {
 350            d2 &= 0x7fffffff;
 351            need_31 = false;
 352        }
 353        tcg_gen_movi_i64(tmp, d2);
 354    }
 355    if (need_31) {
 356        tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
 357    }
 358
 359    return tmp;
 360}
 361
 362static inline bool live_cc_data(DisasContext *s)
 363{
 364    return (s->cc_op != CC_OP_DYNAMIC
 365            && s->cc_op != CC_OP_STATIC
 366            && s->cc_op > 3);
 367}
 368
 369static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
 370{
 371    if (live_cc_data(s)) {
 372        tcg_gen_discard_i64(cc_src);
 373        tcg_gen_discard_i64(cc_dst);
 374        tcg_gen_discard_i64(cc_vr);
 375    }
 376    s->cc_op = CC_OP_CONST0 + val;
 377}
 378
 379static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
 380{
 381    if (live_cc_data(s)) {
 382        tcg_gen_discard_i64(cc_src);
 383        tcg_gen_discard_i64(cc_vr);
 384    }
 385    tcg_gen_mov_i64(cc_dst, dst);
 386    s->cc_op = op;
 387}
 388
 389static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 390                                  TCGv_i64 dst)
 391{
 392    if (live_cc_data(s)) {
 393        tcg_gen_discard_i64(cc_vr);
 394    }
 395    tcg_gen_mov_i64(cc_src, src);
 396    tcg_gen_mov_i64(cc_dst, dst);
 397    s->cc_op = op;
 398}
 399
 400static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
 401                                  TCGv_i64 dst, TCGv_i64 vr)
 402{
 403    tcg_gen_mov_i64(cc_src, src);
 404    tcg_gen_mov_i64(cc_dst, dst);
 405    tcg_gen_mov_i64(cc_vr, vr);
 406    s->cc_op = op;
 407}
 408
 409static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
 410{
 411    gen_op_update1_cc_i64(s, CC_OP_NZ, val);
 412}
 413
 414static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
 415{
 416    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
 417}
 418
 419static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
 420{
 421    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
 422}
 423
 424static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
 425{
 426    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
 427}
 428
 429/* CC value is in env->cc_op */
 430static void set_cc_static(DisasContext *s)
 431{
 432    if (live_cc_data(s)) {
 433        tcg_gen_discard_i64(cc_src);
 434        tcg_gen_discard_i64(cc_dst);
 435        tcg_gen_discard_i64(cc_vr);
 436    }
 437    s->cc_op = CC_OP_STATIC;
 438}
 439
 440/* calculates cc into cc_op */
 441static void gen_op_calc_cc(DisasContext *s)
 442{
 443    TCGv_i32 local_cc_op;
 444    TCGv_i64 dummy;
 445
 446    TCGV_UNUSED_I32(local_cc_op);
 447    TCGV_UNUSED_I64(dummy);
 448    switch (s->cc_op) {
 449    default:
 450        dummy = tcg_const_i64(0);
 451        /* FALLTHRU */
 452    case CC_OP_ADD_64:
 453    case CC_OP_ADDU_64:
 454    case CC_OP_ADDC_64:
 455    case CC_OP_SUB_64:
 456    case CC_OP_SUBU_64:
 457    case CC_OP_SUBB_64:
 458    case CC_OP_ADD_32:
 459    case CC_OP_ADDU_32:
 460    case CC_OP_ADDC_32:
 461    case CC_OP_SUB_32:
 462    case CC_OP_SUBU_32:
 463    case CC_OP_SUBB_32:
 464        local_cc_op = tcg_const_i32(s->cc_op);
 465        break;
 466    case CC_OP_CONST0:
 467    case CC_OP_CONST1:
 468    case CC_OP_CONST2:
 469    case CC_OP_CONST3:
 470    case CC_OP_STATIC:
 471    case CC_OP_DYNAMIC:
 472        break;
 473    }
 474
 475    switch (s->cc_op) {
 476    case CC_OP_CONST0:
 477    case CC_OP_CONST1:
 478    case CC_OP_CONST2:
 479    case CC_OP_CONST3:
 480        /* s->cc_op is the cc value */
 481        tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
 482        break;
 483    case CC_OP_STATIC:
 484        /* env->cc_op already is the cc value */
 485        break;
 486    case CC_OP_NZ:
 487    case CC_OP_ABS_64:
 488    case CC_OP_NABS_64:
 489    case CC_OP_ABS_32:
 490    case CC_OP_NABS_32:
 491    case CC_OP_LTGT0_32:
 492    case CC_OP_LTGT0_64:
 493    case CC_OP_COMP_32:
 494    case CC_OP_COMP_64:
 495    case CC_OP_NZ_F32:
 496    case CC_OP_NZ_F64:
 497    case CC_OP_FLOGR:
 498        /* 1 argument */
 499        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
 500        break;
 501    case CC_OP_ICM:
 502    case CC_OP_LTGT_32:
 503    case CC_OP_LTGT_64:
 504    case CC_OP_LTUGTU_32:
 505    case CC_OP_LTUGTU_64:
 506    case CC_OP_TM_32:
 507    case CC_OP_TM_64:
 508    case CC_OP_SLA_32:
 509    case CC_OP_SLA_64:
 510    case CC_OP_NZ_F128:
 511        /* 2 arguments */
 512        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
 513        break;
 514    case CC_OP_ADD_64:
 515    case CC_OP_ADDU_64:
 516    case CC_OP_ADDC_64:
 517    case CC_OP_SUB_64:
 518    case CC_OP_SUBU_64:
 519    case CC_OP_SUBB_64:
 520    case CC_OP_ADD_32:
 521    case CC_OP_ADDU_32:
 522    case CC_OP_ADDC_32:
 523    case CC_OP_SUB_32:
 524    case CC_OP_SUBU_32:
 525    case CC_OP_SUBB_32:
 526        /* 3 arguments */
 527        gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
 528        break;
 529    case CC_OP_DYNAMIC:
 530        /* unknown operation - assume 3 arguments and cc_op in env */
 531        gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
 532        break;
 533    default:
 534        tcg_abort();
 535    }
 536
 537    if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
 538        tcg_temp_free_i32(local_cc_op);
 539    }
 540    if (!TCGV_IS_UNUSED_I64(dummy)) {
 541        tcg_temp_free_i64(dummy);
 542    }
 543
 544    /* We now have cc in cc_op as constant */
 545    set_cc_static(s);
 546}
 547
 548static int use_goto_tb(DisasContext *s, uint64_t dest)
 549{
 550    /* NOTE: we handle the case where the TB spans two pages here */
 551    return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
 552             || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
 553            && !s->singlestep_enabled
 554            && !(s->tb->cflags & CF_LAST_IO));
 555}
 556
 557static void account_noninline_branch(DisasContext *s, int cc_op)
 558{
 559#ifdef DEBUG_INLINE_BRANCHES
 560    inline_branch_miss[cc_op]++;
 561#endif
 562}
 563
 564static void account_inline_branch(DisasContext *s, int cc_op)
 565{
 566#ifdef DEBUG_INLINE_BRANCHES
 567    inline_branch_hit[cc_op]++;
 568#endif
 569}
 570
 571/* Table of mask values to comparison codes, given a comparison as input.
 572   For such, CC=3 should not be possible.  */
 573static const TCGCond ltgt_cond[16] = {
 574    TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
 575    TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
 576    TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
 577    TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
 578    TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
 579    TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
 580    TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
 581    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
 582};
 583
 584/* Table of mask values to comparison codes, given a logic op as input.
 585   For such, only CC=0 and CC=1 should be possible.  */
 586static const TCGCond nz_cond[16] = {
 587    TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
 588    TCG_COND_NEVER, TCG_COND_NEVER,
 589    TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
 590    TCG_COND_NE, TCG_COND_NE,
 591    TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
 592    TCG_COND_EQ, TCG_COND_EQ,
 593    TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
 594    TCG_COND_ALWAYS, TCG_COND_ALWAYS,
 595};
 596
 597/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
 598   details required to generate a TCG comparison.  */
 599static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
 600{
 601    TCGCond cond;
 602    enum cc_op old_cc_op = s->cc_op;
 603
 604    if (mask == 15 || mask == 0) {
 605        c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
 606        c->u.s32.a = cc_op;
 607        c->u.s32.b = cc_op;
 608        c->g1 = c->g2 = true;
 609        c->is_64 = false;
 610        return;
 611    }
 612
 613    /* Find the TCG condition for the mask + cc op.  */
 614    switch (old_cc_op) {
 615    case CC_OP_LTGT0_32:
 616    case CC_OP_LTGT0_64:
 617    case CC_OP_LTGT_32:
 618    case CC_OP_LTGT_64:
 619        cond = ltgt_cond[mask];
 620        if (cond == TCG_COND_NEVER) {
 621            goto do_dynamic;
 622        }
 623        account_inline_branch(s, old_cc_op);
 624        break;
 625
 626    case CC_OP_LTUGTU_32:
 627    case CC_OP_LTUGTU_64:
 628        cond = tcg_unsigned_cond(ltgt_cond[mask]);
 629        if (cond == TCG_COND_NEVER) {
 630            goto do_dynamic;
 631        }
 632        account_inline_branch(s, old_cc_op);
 633        break;
 634
 635    case CC_OP_NZ:
 636        cond = nz_cond[mask];
 637        if (cond == TCG_COND_NEVER) {
 638            goto do_dynamic;
 639        }
 640        account_inline_branch(s, old_cc_op);
 641        break;
 642
 643    case CC_OP_TM_32:
 644    case CC_OP_TM_64:
 645        switch (mask) {
 646        case 8:
 647            cond = TCG_COND_EQ;
 648            break;
 649        case 4 | 2 | 1:
 650            cond = TCG_COND_NE;
 651            break;
 652        default:
 653            goto do_dynamic;
 654        }
 655        account_inline_branch(s, old_cc_op);
 656        break;
 657
 658    case CC_OP_ICM:
 659        switch (mask) {
 660        case 8:
 661            cond = TCG_COND_EQ;
 662            break;
 663        case 4 | 2 | 1:
 664        case 4 | 2:
 665            cond = TCG_COND_NE;
 666            break;
 667        default:
 668            goto do_dynamic;
 669        }
 670        account_inline_branch(s, old_cc_op);
 671        break;
 672
 673    case CC_OP_FLOGR:
 674        switch (mask & 0xa) {
 675        case 8: /* src == 0 -> no one bit found */
 676            cond = TCG_COND_EQ;
 677            break;
 678        case 2: /* src != 0 -> one bit found */
 679            cond = TCG_COND_NE;
 680            break;
 681        default:
 682            goto do_dynamic;
 683        }
 684        account_inline_branch(s, old_cc_op);
 685        break;
 686
 687    case CC_OP_ADDU_32:
 688    case CC_OP_ADDU_64:
 689        switch (mask) {
 690        case 8 | 2: /* vr == 0 */
 691            cond = TCG_COND_EQ;
 692            break;
 693        case 4 | 1: /* vr != 0 */
 694            cond = TCG_COND_NE;
 695            break;
 696        case 8 | 4: /* no carry -> vr >= src */
 697            cond = TCG_COND_GEU;
 698            break;
 699        case 2 | 1: /* carry -> vr < src */
 700            cond = TCG_COND_LTU;
 701            break;
 702        default:
 703            goto do_dynamic;
 704        }
 705        account_inline_branch(s, old_cc_op);
 706        break;
 707
 708    case CC_OP_SUBU_32:
 709    case CC_OP_SUBU_64:
 710        /* Note that CC=0 is impossible; treat it as dont-care.  */
 711        switch (mask & 7) {
 712        case 2: /* zero -> op1 == op2 */
 713            cond = TCG_COND_EQ;
 714            break;
 715        case 4 | 1: /* !zero -> op1 != op2 */
 716            cond = TCG_COND_NE;
 717            break;
 718        case 4: /* borrow (!carry) -> op1 < op2 */
 719            cond = TCG_COND_LTU;
 720            break;
 721        case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
 722            cond = TCG_COND_GEU;
 723            break;
 724        default:
 725            goto do_dynamic;
 726        }
 727        account_inline_branch(s, old_cc_op);
 728        break;
 729
 730    default:
 731    do_dynamic:
 732        /* Calculate cc value.  */
 733        gen_op_calc_cc(s);
 734        /* FALLTHRU */
 735
 736    case CC_OP_STATIC:
 737        /* Jump based on CC.  We'll load up the real cond below;
 738           the assignment here merely avoids a compiler warning.  */
 739        account_noninline_branch(s, old_cc_op);
 740        old_cc_op = CC_OP_STATIC;
 741        cond = TCG_COND_NEVER;
 742        break;
 743    }
 744
 745    /* Load up the arguments of the comparison.  */
 746    c->is_64 = true;
 747    c->g1 = c->g2 = false;
 748    switch (old_cc_op) {
 749    case CC_OP_LTGT0_32:
 750        c->is_64 = false;
 751        c->u.s32.a = tcg_temp_new_i32();
 752        tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
 753        c->u.s32.b = tcg_const_i32(0);
 754        break;
 755    case CC_OP_LTGT_32:
 756    case CC_OP_LTUGTU_32:
 757    case CC_OP_SUBU_32:
 758        c->is_64 = false;
 759        c->u.s32.a = tcg_temp_new_i32();
 760        tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
 761        c->u.s32.b = tcg_temp_new_i32();
 762        tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
 763        break;
 764
 765    case CC_OP_LTGT0_64:
 766    case CC_OP_NZ:
 767    case CC_OP_FLOGR:
 768        c->u.s64.a = cc_dst;
 769        c->u.s64.b = tcg_const_i64(0);
 770        c->g1 = true;
 771        break;
 772    case CC_OP_LTGT_64:
 773    case CC_OP_LTUGTU_64:
 774    case CC_OP_SUBU_64:
 775        c->u.s64.a = cc_src;
 776        c->u.s64.b = cc_dst;
 777        c->g1 = c->g2 = true;
 778        break;
 779
 780    case CC_OP_TM_32:
 781    case CC_OP_TM_64:
 782    case CC_OP_ICM:
 783        c->u.s64.a = tcg_temp_new_i64();
 784        c->u.s64.b = tcg_const_i64(0);
 785        tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
 786        break;
 787
 788    case CC_OP_ADDU_32:
 789        c->is_64 = false;
 790        c->u.s32.a = tcg_temp_new_i32();
 791        c->u.s32.b = tcg_temp_new_i32();
 792        tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
 793        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
 794            tcg_gen_movi_i32(c->u.s32.b, 0);
 795        } else {
 796            tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
 797        }
 798        break;
 799
 800    case CC_OP_ADDU_64:
 801        c->u.s64.a = cc_vr;
 802        c->g1 = true;
 803        if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
 804            c->u.s64.b = tcg_const_i64(0);
 805        } else {
 806            c->u.s64.b = cc_src;
 807            c->g2 = true;
 808        }
 809        break;
 810
 811    case CC_OP_STATIC:
 812        c->is_64 = false;
 813        c->u.s32.a = cc_op;
 814        c->g1 = true;
 815        switch (mask) {
 816        case 0x8 | 0x4 | 0x2: /* cc != 3 */
 817            cond = TCG_COND_NE;
 818            c->u.s32.b = tcg_const_i32(3);
 819            break;
 820        case 0x8 | 0x4 | 0x1: /* cc != 2 */
 821            cond = TCG_COND_NE;
 822            c->u.s32.b = tcg_const_i32(2);
 823            break;
 824        case 0x8 | 0x2 | 0x1: /* cc != 1 */
 825            cond = TCG_COND_NE;
 826            c->u.s32.b = tcg_const_i32(1);
 827            break;
 828        case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
 829            cond = TCG_COND_EQ;
 830            c->g1 = false;
 831            c->u.s32.a = tcg_temp_new_i32();
 832            c->u.s32.b = tcg_const_i32(0);
 833            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 834            break;
 835        case 0x8 | 0x4: /* cc < 2 */
 836            cond = TCG_COND_LTU;
 837            c->u.s32.b = tcg_const_i32(2);
 838            break;
 839        case 0x8: /* cc == 0 */
 840            cond = TCG_COND_EQ;
 841            c->u.s32.b = tcg_const_i32(0);
 842            break;
 843        case 0x4 | 0x2 | 0x1: /* cc != 0 */
 844            cond = TCG_COND_NE;
 845            c->u.s32.b = tcg_const_i32(0);
 846            break;
 847        case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
 848            cond = TCG_COND_NE;
 849            c->g1 = false;
 850            c->u.s32.a = tcg_temp_new_i32();
 851            c->u.s32.b = tcg_const_i32(0);
 852            tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
 853            break;
 854        case 0x4: /* cc == 1 */
 855            cond = TCG_COND_EQ;
 856            c->u.s32.b = tcg_const_i32(1);
 857            break;
 858        case 0x2 | 0x1: /* cc > 1 */
 859            cond = TCG_COND_GTU;
 860            c->u.s32.b = tcg_const_i32(1);
 861            break;
 862        case 0x2: /* cc == 2 */
 863            cond = TCG_COND_EQ;
 864            c->u.s32.b = tcg_const_i32(2);
 865            break;
 866        case 0x1: /* cc == 3 */
 867            cond = TCG_COND_EQ;
 868            c->u.s32.b = tcg_const_i32(3);
 869            break;
 870        default:
 871            /* CC is masked by something else: (8 >> cc) & mask.  */
 872            cond = TCG_COND_NE;
 873            c->g1 = false;
 874            c->u.s32.a = tcg_const_i32(8);
 875            c->u.s32.b = tcg_const_i32(0);
 876            tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
 877            tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
 878            break;
 879        }
 880        break;
 881
 882    default:
 883        abort();
 884    }
 885    c->cond = cond;
 886}
 887
 888static void free_compare(DisasCompare *c)
 889{
 890    if (!c->g1) {
 891        if (c->is_64) {
 892            tcg_temp_free_i64(c->u.s64.a);
 893        } else {
 894            tcg_temp_free_i32(c->u.s32.a);
 895        }
 896    }
 897    if (!c->g2) {
 898        if (c->is_64) {
 899            tcg_temp_free_i64(c->u.s64.b);
 900        } else {
 901            tcg_temp_free_i32(c->u.s32.b);
 902        }
 903    }
 904}
 905
 906/* ====================================================================== */
 907/* Define the insn format enumeration.  */
 908#define F0(N)                         FMT_##N,
 909#define F1(N, X1)                     F0(N)
 910#define F2(N, X1, X2)                 F0(N)
 911#define F3(N, X1, X2, X3)             F0(N)
 912#define F4(N, X1, X2, X3, X4)         F0(N)
 913#define F5(N, X1, X2, X3, X4, X5)     F0(N)
 914
 915typedef enum {
 916#include "insn-format.def"
 917} DisasFormat;
 918
 919#undef F0
 920#undef F1
 921#undef F2
 922#undef F3
 923#undef F4
 924#undef F5
 925
 926/* Define a structure to hold the decoded fields.  We'll store each inside
 927   an array indexed by an enum.  In order to conserve memory, we'll arrange
 928   for fields that do not exist at the same time to overlap, thus the "C"
 929   for compact.  For checking purposes there is an "O" for original index
 930   as well that will be applied to availability bitmaps.  */
 931
 932enum DisasFieldIndexO {
 933    FLD_O_r1,
 934    FLD_O_r2,
 935    FLD_O_r3,
 936    FLD_O_m1,
 937    FLD_O_m3,
 938    FLD_O_m4,
 939    FLD_O_b1,
 940    FLD_O_b2,
 941    FLD_O_b4,
 942    FLD_O_d1,
 943    FLD_O_d2,
 944    FLD_O_d4,
 945    FLD_O_x2,
 946    FLD_O_l1,
 947    FLD_O_l2,
 948    FLD_O_i1,
 949    FLD_O_i2,
 950    FLD_O_i3,
 951    FLD_O_i4,
 952    FLD_O_i5
 953};
 954
 955enum DisasFieldIndexC {
 956    FLD_C_r1 = 0,
 957    FLD_C_m1 = 0,
 958    FLD_C_b1 = 0,
 959    FLD_C_i1 = 0,
 960
 961    FLD_C_r2 = 1,
 962    FLD_C_b2 = 1,
 963    FLD_C_i2 = 1,
 964
 965    FLD_C_r3 = 2,
 966    FLD_C_m3 = 2,
 967    FLD_C_i3 = 2,
 968
 969    FLD_C_m4 = 3,
 970    FLD_C_b4 = 3,
 971    FLD_C_i4 = 3,
 972    FLD_C_l1 = 3,
 973
 974    FLD_C_i5 = 4,
 975    FLD_C_d1 = 4,
 976
 977    FLD_C_d2 = 5,
 978
 979    FLD_C_d4 = 6,
 980    FLD_C_x2 = 6,
 981    FLD_C_l2 = 6,
 982
 983    NUM_C_FIELD = 7
 984};
 985
 986struct DisasFields {
 987    unsigned op:8;
 988    unsigned op2:8;
 989    unsigned presentC:16;
 990    unsigned int presentO;
 991    int c[NUM_C_FIELD];
 992};
 993
 994/* This is the way fields are to be accessed out of DisasFields.  */
 995#define have_field(S, F)  have_field1((S), FLD_O_##F)
 996#define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
 997
 998static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
 999{
1000    return (f->presentO >> c) & 1;
1001}
1002
1003static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1004                      enum DisasFieldIndexC c)
1005{
1006    assert(have_field1(f, o));
1007    return f->c[c];
1008}
1009
1010/* Describe the layout of each field in each format.  */
1011typedef struct DisasField {
1012    unsigned int beg:8;
1013    unsigned int size:8;
1014    unsigned int type:2;
1015    unsigned int indexC:6;
1016    enum DisasFieldIndexO indexO:8;
1017} DisasField;
1018
1019typedef struct DisasFormatInfo {
1020    DisasField op[NUM_C_FIELD];
1021} DisasFormatInfo;
1022
1023#define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1024#define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1025#define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026                      { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1027#define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1028                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1029                      { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1030#define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1031                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1032#define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1033                      { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1034                      { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1035#define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1036#define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1037
1038#define F0(N)                     { { } },
1039#define F1(N, X1)                 { { X1 } },
1040#define F2(N, X1, X2)             { { X1, X2 } },
1041#define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1042#define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1043#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1044
1045static const DisasFormatInfo format_info[] = {
1046#include "insn-format.def"
1047};
1048
1049#undef F0
1050#undef F1
1051#undef F2
1052#undef F3
1053#undef F4
1054#undef F5
1055#undef R
1056#undef M
1057#undef BD
1058#undef BXD
1059#undef BDL
1060#undef BXDL
1061#undef I
1062#undef L
1063
1064/* Generally, we'll extract operands into this structures, operate upon
1065   them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1066   of routines below for more details.  */
1067typedef struct {
1068    bool g_out, g_out2, g_in1, g_in2;
1069    TCGv_i64 out, out2, in1, in2;
1070    TCGv_i64 addr1;
1071} DisasOps;
1072
1073/* Instructions can place constraints on their operands, raising specification
1074   exceptions if they are violated.  To make this easy to automate, each "in1",
1075   "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1076   of the following, or 0.  To make this easy to document, we'll put the
1077   SPEC_<name> defines next to <name>.  */
1078
1079#define SPEC_r1_even    1
1080#define SPEC_r2_even    2
1081#define SPEC_r3_even    4
1082#define SPEC_r1_f128    8
1083#define SPEC_r2_f128    16
1084
1085/* Return values from translate_one, indicating the state of the TB.  */
1086typedef enum {
1087    /* Continue the TB.  */
1088    NO_EXIT,
1089    /* We have emitted one or more goto_tb.  No fixup required.  */
1090    EXIT_GOTO_TB,
1091    /* We are not using a goto_tb (for whatever reason), but have updated
1092       the PC (for whatever reason), so there's no need to do it again on
1093       exiting the TB.  */
1094    EXIT_PC_UPDATED,
1095    /* We are exiting the TB, but have neither emitted a goto_tb, nor
1096       updated the PC for the next instruction to be executed.  */
1097    EXIT_PC_STALE,
1098    /* We are ending the TB with a noreturn function call, e.g. longjmp.
1099       No following code will be executed.  */
1100    EXIT_NORETURN,
1101} ExitStatus;
1102
1103typedef enum DisasFacility {
1104    FAC_Z,                  /* zarch (default) */
1105    FAC_CASS,               /* compare and swap and store */
1106    FAC_CASS2,              /* compare and swap and store 2*/
1107    FAC_DFP,                /* decimal floating point */
1108    FAC_DFPR,               /* decimal floating point rounding */
1109    FAC_DO,                 /* distinct operands */
1110    FAC_EE,                 /* execute extensions */
1111    FAC_EI,                 /* extended immediate */
1112    FAC_FPE,                /* floating point extension */
1113    FAC_FPSSH,              /* floating point support sign handling */
1114    FAC_FPRGR,              /* FPR-GR transfer */
1115    FAC_GIE,                /* general instructions extension */
1116    FAC_HFP_MA,             /* HFP multiply-and-add/subtract */
1117    FAC_HW,                 /* high-word */
1118    FAC_IEEEE_SIM,          /* IEEE exception sumilation */
1119    FAC_LOC,                /* load/store on condition */
1120    FAC_LD,                 /* long displacement */
1121    FAC_PC,                 /* population count */
1122    FAC_SCF,                /* store clock fast */
1123    FAC_SFLE,               /* store facility list extended */
1124} DisasFacility;
1125
1126struct DisasInsn {
1127    unsigned opc:16;
1128    DisasFormat fmt:8;
1129    DisasFacility fac:8;
1130    unsigned spec:8;
1131
1132    const char *name;
1133
1134    void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1135    void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1136    void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1137    void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1138    void (*help_cout)(DisasContext *, DisasOps *);
1139    ExitStatus (*help_op)(DisasContext *, DisasOps *);
1140
1141    uint64_t data;
1142};
1143
1144/* ====================================================================== */
1145/* Miscellaneous helpers, used by several operations.  */
1146
1147static void help_l2_shift(DisasContext *s, DisasFields *f,
1148                          DisasOps *o, int mask)
1149{
1150    int b2 = get_field(f, b2);
1151    int d2 = get_field(f, d2);
1152
1153    if (b2 == 0) {
1154        o->in2 = tcg_const_i64(d2 & mask);
1155    } else {
1156        o->in2 = get_address(s, 0, b2, d2);
1157        tcg_gen_andi_i64(o->in2, o->in2, mask);
1158    }
1159}
1160
1161static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1162{
1163    if (dest == s->next_pc) {
1164        return NO_EXIT;
1165    }
1166    if (use_goto_tb(s, dest)) {
1167        update_cc_op(s);
1168        tcg_gen_goto_tb(0);
1169        tcg_gen_movi_i64(psw_addr, dest);
1170        tcg_gen_exit_tb((tcg_target_long)s->tb);
1171        return EXIT_GOTO_TB;
1172    } else {
1173        tcg_gen_movi_i64(psw_addr, dest);
1174        return EXIT_PC_UPDATED;
1175    }
1176}
1177
1178static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1179                              bool is_imm, int imm, TCGv_i64 cdest)
1180{
1181    ExitStatus ret;
1182    uint64_t dest = s->pc + 2 * imm;
1183    int lab;
1184
1185    /* Take care of the special cases first.  */
1186    if (c->cond == TCG_COND_NEVER) {
1187        ret = NO_EXIT;
1188        goto egress;
1189    }
1190    if (is_imm) {
1191        if (dest == s->next_pc) {
1192            /* Branch to next.  */
1193            ret = NO_EXIT;
1194            goto egress;
1195        }
1196        if (c->cond == TCG_COND_ALWAYS) {
1197            ret = help_goto_direct(s, dest);
1198            goto egress;
1199        }
1200    } else {
1201        if (TCGV_IS_UNUSED_I64(cdest)) {
1202            /* E.g. bcr %r0 -> no branch.  */
1203            ret = NO_EXIT;
1204            goto egress;
1205        }
1206        if (c->cond == TCG_COND_ALWAYS) {
1207            tcg_gen_mov_i64(psw_addr, cdest);
1208            ret = EXIT_PC_UPDATED;
1209            goto egress;
1210        }
1211    }
1212
1213    if (use_goto_tb(s, s->next_pc)) {
1214        if (is_imm && use_goto_tb(s, dest)) {
1215            /* Both exits can use goto_tb.  */
1216            update_cc_op(s);
1217
1218            lab = gen_new_label();
1219            if (c->is_64) {
1220                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1221            } else {
1222                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1223            }
1224
1225            /* Branch not taken.  */
1226            tcg_gen_goto_tb(0);
1227            tcg_gen_movi_i64(psw_addr, s->next_pc);
1228            tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1229
1230            /* Branch taken.  */
1231            gen_set_label(lab);
1232            tcg_gen_goto_tb(1);
1233            tcg_gen_movi_i64(psw_addr, dest);
1234            tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1235
1236            ret = EXIT_GOTO_TB;
1237        } else {
1238            /* Fallthru can use goto_tb, but taken branch cannot.  */
1239            /* Store taken branch destination before the brcond.  This
1240               avoids having to allocate a new local temp to hold it.
1241               We'll overwrite this in the not taken case anyway.  */
1242            if (!is_imm) {
1243                tcg_gen_mov_i64(psw_addr, cdest);
1244            }
1245
1246            lab = gen_new_label();
1247            if (c->is_64) {
1248                tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1249            } else {
1250                tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1251            }
1252
1253            /* Branch not taken.  */
1254            update_cc_op(s);
1255            tcg_gen_goto_tb(0);
1256            tcg_gen_movi_i64(psw_addr, s->next_pc);
1257            tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1258
1259            gen_set_label(lab);
1260            if (is_imm) {
1261                tcg_gen_movi_i64(psw_addr, dest);
1262            }
1263            ret = EXIT_PC_UPDATED;
1264        }
1265    } else {
1266        /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1267           Most commonly we're single-stepping or some other condition that
1268           disables all use of goto_tb.  Just update the PC and exit.  */
1269
1270        TCGv_i64 next = tcg_const_i64(s->next_pc);
1271        if (is_imm) {
1272            cdest = tcg_const_i64(dest);
1273        }
1274
1275        if (c->is_64) {
1276            tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1277                                cdest, next);
1278        } else {
1279            TCGv_i32 t0 = tcg_temp_new_i32();
1280            TCGv_i64 t1 = tcg_temp_new_i64();
1281            TCGv_i64 z = tcg_const_i64(0);
1282            tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1283            tcg_gen_extu_i32_i64(t1, t0);
1284            tcg_temp_free_i32(t0);
1285            tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1286            tcg_temp_free_i64(t1);
1287            tcg_temp_free_i64(z);
1288        }
1289
1290        if (is_imm) {
1291            tcg_temp_free_i64(cdest);
1292        }
1293        tcg_temp_free_i64(next);
1294
1295        ret = EXIT_PC_UPDATED;
1296    }
1297
1298 egress:
1299    free_compare(c);
1300    return ret;
1301}
1302
1303/* ====================================================================== */
1304/* The operations.  These perform the bulk of the work for any insn,
1305   usually after the operands have been loaded and output initialized.  */
1306
1307static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1308{
1309    gen_helper_abs_i64(o->out, o->in2);
1310    return NO_EXIT;
1311}
1312
1313static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1314{
1315    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1316    return NO_EXIT;
1317}
1318
1319static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1320{
1321    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1322    return NO_EXIT;
1323}
1324
1325static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1326{
1327    tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1328    tcg_gen_mov_i64(o->out2, o->in2);
1329    return NO_EXIT;
1330}
1331
1332static ExitStatus op_add(DisasContext *s, DisasOps *o)
1333{
1334    tcg_gen_add_i64(o->out, o->in1, o->in2);
1335    return NO_EXIT;
1336}
1337
1338static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1339{
1340    DisasCompare cmp;
1341    TCGv_i64 carry;
1342
1343    tcg_gen_add_i64(o->out, o->in1, o->in2);
1344
1345    /* The carry flag is the msb of CC, therefore the branch mask that would
1346       create that comparison is 3.  Feeding the generated comparison to
1347       setcond produces the carry flag that we desire.  */
1348    disas_jcc(s, &cmp, 3);
1349    carry = tcg_temp_new_i64();
1350    if (cmp.is_64) {
1351        tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1352    } else {
1353        TCGv_i32 t = tcg_temp_new_i32();
1354        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1355        tcg_gen_extu_i32_i64(carry, t);
1356        tcg_temp_free_i32(t);
1357    }
1358    free_compare(&cmp);
1359
1360    tcg_gen_add_i64(o->out, o->out, carry);
1361    tcg_temp_free_i64(carry);
1362    return NO_EXIT;
1363}
1364
1365static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1366{
1367    gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1368    return NO_EXIT;
1369}
1370
1371static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1372{
1373    gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1374    return NO_EXIT;
1375}
1376
1377static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1378{
1379    gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1380    return_low128(o->out2);
1381    return NO_EXIT;
1382}
1383
1384static ExitStatus op_and(DisasContext *s, DisasOps *o)
1385{
1386    tcg_gen_and_i64(o->out, o->in1, o->in2);
1387    return NO_EXIT;
1388}
1389
1390static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1391{
1392    int shift = s->insn->data & 0xff;
1393    int size = s->insn->data >> 8;
1394    uint64_t mask = ((1ull << size) - 1) << shift;
1395
1396    assert(!o->g_in2);
1397    tcg_gen_shli_i64(o->in2, o->in2, shift);
1398    tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1399    tcg_gen_and_i64(o->out, o->in1, o->in2);
1400
1401    /* Produce the CC from only the bits manipulated.  */
1402    tcg_gen_andi_i64(cc_dst, o->out, mask);
1403    set_cc_nz_u64(s, cc_dst);
1404    return NO_EXIT;
1405}
1406
1407static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1408{
1409    tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1410    if (!TCGV_IS_UNUSED_I64(o->in2)) {
1411        tcg_gen_mov_i64(psw_addr, o->in2);
1412        return EXIT_PC_UPDATED;
1413    } else {
1414        return NO_EXIT;
1415    }
1416}
1417
1418static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1419{
1420    tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1421    return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1422}
1423
1424static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1425{
1426    int m1 = get_field(s->fields, m1);
1427    bool is_imm = have_field(s->fields, i2);
1428    int imm = is_imm ? get_field(s->fields, i2) : 0;
1429    DisasCompare c;
1430
1431    disas_jcc(s, &c, m1);
1432    return help_branch(s, &c, is_imm, imm, o->in2);
1433}
1434
1435static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1436{
1437    int r1 = get_field(s->fields, r1);
1438    bool is_imm = have_field(s->fields, i2);
1439    int imm = is_imm ? get_field(s->fields, i2) : 0;
1440    DisasCompare c;
1441    TCGv_i64 t;
1442
1443    c.cond = TCG_COND_NE;
1444    c.is_64 = false;
1445    c.g1 = false;
1446    c.g2 = false;
1447
1448    t = tcg_temp_new_i64();
1449    tcg_gen_subi_i64(t, regs[r1], 1);
1450    store_reg32_i64(r1, t);
1451    c.u.s32.a = tcg_temp_new_i32();
1452    c.u.s32.b = tcg_const_i32(0);
1453    tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1454    tcg_temp_free_i64(t);
1455
1456    return help_branch(s, &c, is_imm, imm, o->in2);
1457}
1458
1459static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1460{
1461    int r1 = get_field(s->fields, r1);
1462    bool is_imm = have_field(s->fields, i2);
1463    int imm = is_imm ? get_field(s->fields, i2) : 0;
1464    DisasCompare c;
1465
1466    c.cond = TCG_COND_NE;
1467    c.is_64 = true;
1468    c.g1 = true;
1469    c.g2 = false;
1470
1471    tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1472    c.u.s64.a = regs[r1];
1473    c.u.s64.b = tcg_const_i64(0);
1474
1475    return help_branch(s, &c, is_imm, imm, o->in2);
1476}
1477
1478static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1479{
1480    int r1 = get_field(s->fields, r1);
1481    int r3 = get_field(s->fields, r3);
1482    bool is_imm = have_field(s->fields, i2);
1483    int imm = is_imm ? get_field(s->fields, i2) : 0;
1484    DisasCompare c;
1485    TCGv_i64 t;
1486
1487    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1488    c.is_64 = false;
1489    c.g1 = false;
1490    c.g2 = false;
1491
1492    t = tcg_temp_new_i64();
1493    tcg_gen_add_i64(t, regs[r1], regs[r3]);
1494    c.u.s32.a = tcg_temp_new_i32();
1495    c.u.s32.b = tcg_temp_new_i32();
1496    tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1497    tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1498    store_reg32_i64(r1, t);
1499    tcg_temp_free_i64(t);
1500
1501    return help_branch(s, &c, is_imm, imm, o->in2);
1502}
1503
1504static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1505{
1506    int r1 = get_field(s->fields, r1);
1507    int r3 = get_field(s->fields, r3);
1508    bool is_imm = have_field(s->fields, i2);
1509    int imm = is_imm ? get_field(s->fields, i2) : 0;
1510    DisasCompare c;
1511
1512    c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1513    c.is_64 = true;
1514
1515    if (r1 == (r3 | 1)) {
1516        c.u.s64.b = load_reg(r3 | 1);
1517        c.g2 = false;
1518    } else {
1519        c.u.s64.b = regs[r3 | 1];
1520        c.g2 = true;
1521    }
1522
1523    tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1524    c.u.s64.a = regs[r1];
1525    c.g1 = true;
1526
1527    return help_branch(s, &c, is_imm, imm, o->in2);
1528}
1529
1530static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1531{
1532    int imm, m3 = get_field(s->fields, m3);
1533    bool is_imm;
1534    DisasCompare c;
1535
1536    c.cond = ltgt_cond[m3];
1537    if (s->insn->data) {
1538        c.cond = tcg_unsigned_cond(c.cond);
1539    }
1540    c.is_64 = c.g1 = c.g2 = true;
1541    c.u.s64.a = o->in1;
1542    c.u.s64.b = o->in2;
1543
1544    is_imm = have_field(s->fields, i4);
1545    if (is_imm) {
1546        imm = get_field(s->fields, i4);
1547    } else {
1548        imm = 0;
1549        o->out = get_address(s, 0, get_field(s->fields, b4),
1550                             get_field(s->fields, d4));
1551    }
1552
1553    return help_branch(s, &c, is_imm, imm, o->out);
1554}
1555
1556static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1557{
1558    gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1559    set_cc_static(s);
1560    return NO_EXIT;
1561}
1562
1563static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1564{
1565    gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1566    set_cc_static(s);
1567    return NO_EXIT;
1568}
1569
1570static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1571{
1572    gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1573    set_cc_static(s);
1574    return NO_EXIT;
1575}
1576
1577static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1578{
1579    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1580    gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1581    tcg_temp_free_i32(m3);
1582    gen_set_cc_nz_f32(s, o->in2);
1583    return NO_EXIT;
1584}
1585
1586static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1587{
1588    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1589    gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1590    tcg_temp_free_i32(m3);
1591    gen_set_cc_nz_f64(s, o->in2);
1592    return NO_EXIT;
1593}
1594
1595static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1596{
1597    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1598    gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1599    tcg_temp_free_i32(m3);
1600    gen_set_cc_nz_f128(s, o->in1, o->in2);
1601    return NO_EXIT;
1602}
1603
1604static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1605{
1606    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1607    gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1608    tcg_temp_free_i32(m3);
1609    gen_set_cc_nz_f32(s, o->in2);
1610    return NO_EXIT;
1611}
1612
1613static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1614{
1615    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1616    gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1617    tcg_temp_free_i32(m3);
1618    gen_set_cc_nz_f64(s, o->in2);
1619    return NO_EXIT;
1620}
1621
1622static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1623{
1624    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1625    gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1626    tcg_temp_free_i32(m3);
1627    gen_set_cc_nz_f128(s, o->in1, o->in2);
1628    return NO_EXIT;
1629}
1630
1631static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1632{
1633    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1634    gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1635    tcg_temp_free_i32(m3);
1636    gen_set_cc_nz_f32(s, o->in2);
1637    return NO_EXIT;
1638}
1639
1640static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1641{
1642    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1643    gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1644    tcg_temp_free_i32(m3);
1645    gen_set_cc_nz_f64(s, o->in2);
1646    return NO_EXIT;
1647}
1648
1649static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1650{
1651    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1652    gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1653    tcg_temp_free_i32(m3);
1654    gen_set_cc_nz_f128(s, o->in1, o->in2);
1655    return NO_EXIT;
1656}
1657
1658static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1659{
1660    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1661    gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1662    tcg_temp_free_i32(m3);
1663    gen_set_cc_nz_f32(s, o->in2);
1664    return NO_EXIT;
1665}
1666
1667static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1668{
1669    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1670    gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1671    tcg_temp_free_i32(m3);
1672    gen_set_cc_nz_f64(s, o->in2);
1673    return NO_EXIT;
1674}
1675
1676static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1677{
1678    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1679    gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1680    tcg_temp_free_i32(m3);
1681    gen_set_cc_nz_f128(s, o->in1, o->in2);
1682    return NO_EXIT;
1683}
1684
1685static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1686{
1687    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1688    gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1689    tcg_temp_free_i32(m3);
1690    return NO_EXIT;
1691}
1692
1693static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1694{
1695    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1696    gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1697    tcg_temp_free_i32(m3);
1698    return NO_EXIT;
1699}
1700
1701static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1702{
1703    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1704    gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1705    tcg_temp_free_i32(m3);
1706    return_low128(o->out2);
1707    return NO_EXIT;
1708}
1709
1710static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1711{
1712    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1713    gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1714    tcg_temp_free_i32(m3);
1715    return NO_EXIT;
1716}
1717
1718static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1719{
1720    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1721    gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1722    tcg_temp_free_i32(m3);
1723    return NO_EXIT;
1724}
1725
1726static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1727{
1728    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1729    gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1730    tcg_temp_free_i32(m3);
1731    return_low128(o->out2);
1732    return NO_EXIT;
1733}
1734
1735static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1736{
1737    int r2 = get_field(s->fields, r2);
1738    TCGv_i64 len = tcg_temp_new_i64();
1739
1740    potential_page_fault(s);
1741    gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1742    set_cc_static(s);
1743    return_low128(o->out);
1744
1745    tcg_gen_add_i64(regs[r2], regs[r2], len);
1746    tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1747    tcg_temp_free_i64(len);
1748
1749    return NO_EXIT;
1750}
1751
1752static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1753{
1754    int l = get_field(s->fields, l1);
1755    TCGv_i32 vl;
1756
1757    switch (l + 1) {
1758    case 1:
1759        tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1760        tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1761        break;
1762    case 2:
1763        tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1764        tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1765        break;
1766    case 4:
1767        tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1768        tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1769        break;
1770    case 8:
1771        tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1772        tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1773        break;
1774    default:
1775        potential_page_fault(s);
1776        vl = tcg_const_i32(l);
1777        gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1778        tcg_temp_free_i32(vl);
1779        set_cc_static(s);
1780        return NO_EXIT;
1781    }
1782    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1783    return NO_EXIT;
1784}
1785
1786static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1787{
1788    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1789    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1790    potential_page_fault(s);
1791    gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1792    tcg_temp_free_i32(r1);
1793    tcg_temp_free_i32(r3);
1794    set_cc_static(s);
1795    return NO_EXIT;
1796}
1797
1798static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1799{
1800    TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1801    TCGv_i32 t1 = tcg_temp_new_i32();
1802    tcg_gen_trunc_i64_i32(t1, o->in1);
1803    potential_page_fault(s);
1804    gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1805    set_cc_static(s);
1806    tcg_temp_free_i32(t1);
1807    tcg_temp_free_i32(m3);
1808    return NO_EXIT;
1809}
1810
1811static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1812{
1813    potential_page_fault(s);
1814    gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1815    set_cc_static(s);
1816    return_low128(o->in2);
1817    return NO_EXIT;
1818}
1819
1820static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1821{
1822    TCGv_i64 t = tcg_temp_new_i64();
1823    tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1824    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1825    tcg_gen_or_i64(o->out, o->out, t);
1826    tcg_temp_free_i64(t);
1827    return NO_EXIT;
1828}
1829
1830static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1831{
1832    /* FIXME: needs an atomic solution for CONFIG_USER_ONLY.  */
1833    int d2 = get_field(s->fields, d2);
1834    int b2 = get_field(s->fields, b2);
1835    int is_64 = s->insn->data;
1836    TCGv_i64 addr, mem, cc, z;
1837
1838    /* Note that in1 = R3 (new value) and
1839       in2 = (zero-extended) R1 (expected value).  */
1840
1841    /* Load the memory into the (temporary) output.  While the PoO only talks
1842       about moving the memory to R1 on inequality, if we include equality it
1843       means that R1 is equal to the memory in all conditions.  */
1844    addr = get_address(s, 0, b2, d2);
1845    if (is_64) {
1846        tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1847    } else {
1848        tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1849    }
1850
1851    /* Are the memory and expected values (un)equal?  Note that this setcond
1852       produces the output CC value, thus the NE sense of the test.  */
1853    cc = tcg_temp_new_i64();
1854    tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1855
1856    /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1857       Recall that we are allowed to unconditionally issue the store (and
1858       thus any possible write trap), so (re-)store the original contents
1859       of MEM in case of inequality.  */
1860    z = tcg_const_i64(0);
1861    mem = tcg_temp_new_i64();
1862    tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1863    if (is_64) {
1864        tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1865    } else {
1866        tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1867    }
1868    tcg_temp_free_i64(z);
1869    tcg_temp_free_i64(mem);
1870    tcg_temp_free_i64(addr);
1871
1872    /* Store CC back to cc_op.  Wait until after the store so that any
1873       exception gets the old cc_op value.  */
1874    tcg_gen_trunc_i64_i32(cc_op, cc);
1875    tcg_temp_free_i64(cc);
1876    set_cc_static(s);
1877    return NO_EXIT;
1878}
1879
1880static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1881{
1882    /* FIXME: needs an atomic solution for CONFIG_USER_ONLY.  */
1883    int r1 = get_field(s->fields, r1);
1884    int r3 = get_field(s->fields, r3);
1885    int d2 = get_field(s->fields, d2);
1886    int b2 = get_field(s->fields, b2);
1887    TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1888
1889    /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
1890
1891    addrh = get_address(s, 0, b2, d2);
1892    addrl = get_address(s, 0, b2, d2 + 8);
1893    outh = tcg_temp_new_i64();
1894    outl = tcg_temp_new_i64();
1895
1896    tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1897    tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1898
1899    /* Fold the double-word compare with arithmetic.  */
1900    cc = tcg_temp_new_i64();
1901    z = tcg_temp_new_i64();
1902    tcg_gen_xor_i64(cc, outh, regs[r1]);
1903    tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1904    tcg_gen_or_i64(cc, cc, z);
1905    tcg_gen_movi_i64(z, 0);
1906    tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1907
1908    memh = tcg_temp_new_i64();
1909    meml = tcg_temp_new_i64();
1910    tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1911    tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1912    tcg_temp_free_i64(z);
1913
1914    tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1915    tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1916    tcg_temp_free_i64(memh);
1917    tcg_temp_free_i64(meml);
1918    tcg_temp_free_i64(addrh);
1919    tcg_temp_free_i64(addrl);
1920
1921    /* Save back state now that we've passed all exceptions.  */
1922    tcg_gen_mov_i64(regs[r1], outh);
1923    tcg_gen_mov_i64(regs[r1 + 1], outl);
1924    tcg_gen_trunc_i64_i32(cc_op, cc);
1925    tcg_temp_free_i64(outh);
1926    tcg_temp_free_i64(outl);
1927    tcg_temp_free_i64(cc);
1928    set_cc_static(s);
1929    return NO_EXIT;
1930}
1931
1932#ifndef CONFIG_USER_ONLY
1933static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1934{
1935    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1936    check_privileged(s);
1937    gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1938    tcg_temp_free_i32(r1);
1939    set_cc_static(s);
1940    return NO_EXIT;
1941}
1942#endif
1943
1944static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1945{
1946    TCGv_i64 t1 = tcg_temp_new_i64();
1947    TCGv_i32 t2 = tcg_temp_new_i32();
1948    tcg_gen_trunc_i64_i32(t2, o->in1);
1949    gen_helper_cvd(t1, t2);
1950    tcg_temp_free_i32(t2);
1951    tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1952    tcg_temp_free_i64(t1);
1953    return NO_EXIT;
1954}
1955
1956static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1957{
1958    int m3 = get_field(s->fields, m3);
1959    int lab = gen_new_label();
1960    TCGv_i32 t;
1961    TCGCond c;
1962
1963    c = tcg_invert_cond(ltgt_cond[m3]);
1964    if (s->insn->data) {
1965        c = tcg_unsigned_cond(c);
1966    }
1967    tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1968
1969    /* Set DXC to 0xff.  */
1970    t = tcg_temp_new_i32();
1971    tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1972    tcg_gen_ori_i32(t, t, 0xff00);
1973    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1974    tcg_temp_free_i32(t);
1975
1976    /* Trap.  */
1977    gen_program_exception(s, PGM_DATA);
1978
1979    gen_set_label(lab);
1980    return NO_EXIT;
1981}
1982
1983#ifndef CONFIG_USER_ONLY
1984static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1985{
1986    TCGv_i32 tmp;
1987
1988    check_privileged(s);
1989    potential_page_fault(s);
1990
1991    /* We pretend the format is RX_a so that D2 is the field we want.  */
1992    tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1993    gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1994    tcg_temp_free_i32(tmp);
1995    return NO_EXIT;
1996}
1997#endif
1998
1999static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2000{
2001    gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2002    return_low128(o->out);
2003    return NO_EXIT;
2004}
2005
2006static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2007{
2008    gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2009    return_low128(o->out);
2010    return NO_EXIT;
2011}
2012
2013static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2014{
2015    gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2016    return_low128(o->out);
2017    return NO_EXIT;
2018}
2019
2020static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2021{
2022    gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2023    return_low128(o->out);
2024    return NO_EXIT;
2025}
2026
2027static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2028{
2029    gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2030    return NO_EXIT;
2031}
2032
2033static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2034{
2035    gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2036    return NO_EXIT;
2037}
2038
2039static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2040{
2041    gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2042    return_low128(o->out2);
2043    return NO_EXIT;
2044}
2045
2046static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2047{
2048    int r2 = get_field(s->fields, r2);
2049    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2050    return NO_EXIT;
2051}
2052
2053static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2054{
2055    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2056    return NO_EXIT;
2057}
2058
2059static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2060{
2061    /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2062       tb->flags, (ab)use the tb->cs_base field as the address of
2063       the template in memory, and grab 8 bits of tb->flags/cflags for
2064       the contents of the register.  We would then recognize all this
2065       in gen_intermediate_code_internal, generating code for exactly
2066       one instruction.  This new TB then gets executed normally.
2067
2068       On the other hand, this seems to be mostly used for modifying
2069       MVC inside of memcpy, which needs a helper call anyway.  So
2070       perhaps this doesn't bear thinking about any further.  */
2071
2072    TCGv_i64 tmp;
2073
2074    update_psw_addr(s);
2075    update_cc_op(s);
2076
2077    tmp = tcg_const_i64(s->next_pc);
2078    gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2079    tcg_temp_free_i64(tmp);
2080
2081    set_cc_static(s);
2082    return NO_EXIT;
2083}
2084
2085static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2086{
2087    /* We'll use the original input for cc computation, since we get to
2088       compare that against 0, which ought to be better than comparing
2089       the real output against 64.  It also lets cc_dst be a convenient
2090       temporary during our computation.  */
2091    gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2092
2093    /* R1 = IN ? CLZ(IN) : 64.  */
2094    gen_helper_clz(o->out, o->in2);
2095
2096    /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2097       value by 64, which is undefined.  But since the shift is 64 iff the
2098       input is zero, we still get the correct result after and'ing.  */
2099    tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2100    tcg_gen_shr_i64(o->out2, o->out2, o->out);
2101    tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2102    return NO_EXIT;
2103}
2104
2105static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2106{
2107    int m3 = get_field(s->fields, m3);
2108    int pos, len, base = s->insn->data;
2109    TCGv_i64 tmp = tcg_temp_new_i64();
2110    uint64_t ccm;
2111
2112    switch (m3) {
2113    case 0xf:
2114        /* Effectively a 32-bit load.  */
2115        tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2116        len = 32;
2117        goto one_insert;
2118
2119    case 0xc:
2120    case 0x6:
2121    case 0x3:
2122        /* Effectively a 16-bit load.  */
2123        tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2124        len = 16;
2125        goto one_insert;
2126
2127    case 0x8:
2128    case 0x4:
2129    case 0x2:
2130    case 0x1:
2131        /* Effectively an 8-bit load.  */
2132        tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2133        len = 8;
2134        goto one_insert;
2135
2136    one_insert:
2137        pos = base + ctz32(m3) * 8;
2138        tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2139        ccm = ((1ull << len) - 1) << pos;
2140        break;
2141
2142    default:
2143        /* This is going to be a sequence of loads and inserts.  */
2144        pos = base + 32 - 8;
2145        ccm = 0;
2146        while (m3) {
2147            if (m3 & 0x8) {
2148                tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2149                tcg_gen_addi_i64(o->in2, o->in2, 1);
2150                tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2151                ccm |= 0xff << pos;
2152            }
2153            m3 = (m3 << 1) & 0xf;
2154            pos -= 8;
2155        }
2156        break;
2157    }
2158
2159    tcg_gen_movi_i64(tmp, ccm);
2160    gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2161    tcg_temp_free_i64(tmp);
2162    return NO_EXIT;
2163}
2164
2165static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2166{
2167    int shift = s->insn->data & 0xff;
2168    int size = s->insn->data >> 8;
2169    tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2170    return NO_EXIT;
2171}
2172
2173static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2174{
2175    TCGv_i64 t1;
2176
2177    gen_op_calc_cc(s);
2178    tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2179
2180    t1 = tcg_temp_new_i64();
2181    tcg_gen_shli_i64(t1, psw_mask, 20);
2182    tcg_gen_shri_i64(t1, t1, 36);
2183    tcg_gen_or_i64(o->out, o->out, t1);
2184
2185    tcg_gen_extu_i32_i64(t1, cc_op);
2186    tcg_gen_shli_i64(t1, t1, 28);
2187    tcg_gen_or_i64(o->out, o->out, t1);
2188    tcg_temp_free_i64(t1);
2189    return NO_EXIT;
2190}
2191
2192#ifndef CONFIG_USER_ONLY
2193static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2194{
2195    check_privileged(s);
2196    gen_helper_ipte(cpu_env, o->in1, o->in2);
2197    return NO_EXIT;
2198}
2199
2200static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2201{
2202    check_privileged(s);
2203    gen_helper_iske(o->out, cpu_env, o->in2);
2204    return NO_EXIT;
2205}
2206#endif
2207
2208static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2209{
2210    gen_helper_ldeb(o->out, cpu_env, o->in2);
2211    return NO_EXIT;
2212}
2213
2214static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2215{
2216    gen_helper_ledb(o->out, cpu_env, o->in2);
2217    return NO_EXIT;
2218}
2219
2220static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2221{
2222    gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2223    return NO_EXIT;
2224}
2225
2226static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2227{
2228    gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2229    return NO_EXIT;
2230}
2231
2232static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2233{
2234    gen_helper_lxdb(o->out, cpu_env, o->in2);
2235    return_low128(o->out2);
2236    return NO_EXIT;
2237}
2238
2239static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2240{
2241    gen_helper_lxeb(o->out, cpu_env, o->in2);
2242    return_low128(o->out2);
2243    return NO_EXIT;
2244}
2245
2246static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2247{
2248    tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2249    return NO_EXIT;
2250}
2251
2252static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2253{
2254    tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2255    return NO_EXIT;
2256}
2257
2258static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2259{
2260    tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2261    return NO_EXIT;
2262}
2263
2264static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2265{
2266    tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2267    return NO_EXIT;
2268}
2269
2270static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2271{
2272    tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2273    return NO_EXIT;
2274}
2275
2276static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2277{
2278    tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2279    return NO_EXIT;
2280}
2281
2282static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2283{
2284    tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2285    return NO_EXIT;
2286}
2287
2288static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2289{
2290    tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2291    return NO_EXIT;
2292}
2293
2294static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2295{
2296    DisasCompare c;
2297
2298    disas_jcc(s, &c, get_field(s->fields, m3));
2299
2300    if (c.is_64) {
2301        tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2302                            o->in2, o->in1);
2303        free_compare(&c);
2304    } else {
2305        TCGv_i32 t32 = tcg_temp_new_i32();
2306        TCGv_i64 t, z;
2307
2308        tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2309        free_compare(&c);
2310
2311        t = tcg_temp_new_i64();
2312        tcg_gen_extu_i32_i64(t, t32);
2313        tcg_temp_free_i32(t32);
2314
2315        z = tcg_const_i64(0);
2316        tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2317        tcg_temp_free_i64(t);
2318        tcg_temp_free_i64(z);
2319    }
2320
2321    return NO_EXIT;
2322}
2323
2324#ifndef CONFIG_USER_ONLY
2325static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2326{
2327    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2328    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2329    check_privileged(s);
2330    potential_page_fault(s);
2331    gen_helper_lctl(cpu_env, r1, o->in2, r3);
2332    tcg_temp_free_i32(r1);
2333    tcg_temp_free_i32(r3);
2334    return NO_EXIT;
2335}
2336
2337static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2338{
2339    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2340    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2341    check_privileged(s);
2342    potential_page_fault(s);
2343    gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2344    tcg_temp_free_i32(r1);
2345    tcg_temp_free_i32(r3);
2346    return NO_EXIT;
2347}
2348static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2349{
2350    check_privileged(s);
2351    potential_page_fault(s);
2352    gen_helper_lra(o->out, cpu_env, o->in2);
2353    set_cc_static(s);
2354    return NO_EXIT;
2355}
2356
2357static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2358{
2359    TCGv_i64 t1, t2;
2360
2361    check_privileged(s);
2362
2363    t1 = tcg_temp_new_i64();
2364    t2 = tcg_temp_new_i64();
2365    tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2366    tcg_gen_addi_i64(o->in2, o->in2, 4);
2367    tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2368    /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
2369    tcg_gen_shli_i64(t1, t1, 32);
2370    gen_helper_load_psw(cpu_env, t1, t2);
2371    tcg_temp_free_i64(t1);
2372    tcg_temp_free_i64(t2);
2373    return EXIT_NORETURN;
2374}
2375
2376static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2377{
2378    TCGv_i64 t1, t2;
2379
2380    check_privileged(s);
2381
2382    t1 = tcg_temp_new_i64();
2383    t2 = tcg_temp_new_i64();
2384    tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2385    tcg_gen_addi_i64(o->in2, o->in2, 8);
2386    tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2387    gen_helper_load_psw(cpu_env, t1, t2);
2388    tcg_temp_free_i64(t1);
2389    tcg_temp_free_i64(t2);
2390    return EXIT_NORETURN;
2391}
2392#endif
2393
2394static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2395{
2396    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2397    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2398    potential_page_fault(s);
2399    gen_helper_lam(cpu_env, r1, o->in2, r3);
2400    tcg_temp_free_i32(r1);
2401    tcg_temp_free_i32(r3);
2402    return NO_EXIT;
2403}
2404
2405static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2406{
2407    int r1 = get_field(s->fields, r1);
2408    int r3 = get_field(s->fields, r3);
2409    TCGv_i64 t = tcg_temp_new_i64();
2410    TCGv_i64 t4 = tcg_const_i64(4);
2411
2412    while (1) {
2413        tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2414        store_reg32_i64(r1, t);
2415        if (r1 == r3) {
2416            break;
2417        }
2418        tcg_gen_add_i64(o->in2, o->in2, t4);
2419        r1 = (r1 + 1) & 15;
2420    }
2421
2422    tcg_temp_free_i64(t);
2423    tcg_temp_free_i64(t4);
2424    return NO_EXIT;
2425}
2426
2427static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2428{
2429    int r1 = get_field(s->fields, r1);
2430    int r3 = get_field(s->fields, r3);
2431    TCGv_i64 t = tcg_temp_new_i64();
2432    TCGv_i64 t4 = tcg_const_i64(4);
2433
2434    while (1) {
2435        tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2436        store_reg32h_i64(r1, t);
2437        if (r1 == r3) {
2438            break;
2439        }
2440        tcg_gen_add_i64(o->in2, o->in2, t4);
2441        r1 = (r1 + 1) & 15;
2442    }
2443
2444    tcg_temp_free_i64(t);
2445    tcg_temp_free_i64(t4);
2446    return NO_EXIT;
2447}
2448
2449static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2450{
2451    int r1 = get_field(s->fields, r1);
2452    int r3 = get_field(s->fields, r3);
2453    TCGv_i64 t8 = tcg_const_i64(8);
2454
2455    while (1) {
2456        tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2457        if (r1 == r3) {
2458            break;
2459        }
2460        tcg_gen_add_i64(o->in2, o->in2, t8);
2461        r1 = (r1 + 1) & 15;
2462    }
2463
2464    tcg_temp_free_i64(t8);
2465    return NO_EXIT;
2466}
2467
2468static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2469{
2470    o->out = o->in2;
2471    o->g_out = o->g_in2;
2472    TCGV_UNUSED_I64(o->in2);
2473    o->g_in2 = false;
2474    return NO_EXIT;
2475}
2476
2477static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2478{
2479    o->out = o->in1;
2480    o->out2 = o->in2;
2481    o->g_out = o->g_in1;
2482    o->g_out2 = o->g_in2;
2483    TCGV_UNUSED_I64(o->in1);
2484    TCGV_UNUSED_I64(o->in2);
2485    o->g_in1 = o->g_in2 = false;
2486    return NO_EXIT;
2487}
2488
2489static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2490{
2491    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2492    potential_page_fault(s);
2493    gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2494    tcg_temp_free_i32(l);
2495    return NO_EXIT;
2496}
2497
2498static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2499{
2500    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2501    TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2502    potential_page_fault(s);
2503    gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2504    tcg_temp_free_i32(r1);
2505    tcg_temp_free_i32(r2);
2506    set_cc_static(s);
2507    return NO_EXIT;
2508}
2509
2510static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2511{
2512    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2513    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2514    potential_page_fault(s);
2515    gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2516    tcg_temp_free_i32(r1);
2517    tcg_temp_free_i32(r3);
2518    set_cc_static(s);
2519    return NO_EXIT;
2520}
2521
2522#ifndef CONFIG_USER_ONLY
2523static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2524{
2525    int r1 = get_field(s->fields, l1);
2526    check_privileged(s);
2527    potential_page_fault(s);
2528    gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2529    set_cc_static(s);
2530    return NO_EXIT;
2531}
2532
2533static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2534{
2535    int r1 = get_field(s->fields, l1);
2536    check_privileged(s);
2537    potential_page_fault(s);
2538    gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2539    set_cc_static(s);
2540    return NO_EXIT;
2541}
2542#endif
2543
2544static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2545{
2546    potential_page_fault(s);
2547    gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2548    set_cc_static(s);
2549    return NO_EXIT;
2550}
2551
2552static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2553{
2554    potential_page_fault(s);
2555    gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2556    set_cc_static(s);
2557    return_low128(o->in2);
2558    return NO_EXIT;
2559}
2560
2561static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2562{
2563    tcg_gen_mul_i64(o->out, o->in1, o->in2);
2564    return NO_EXIT;
2565}
2566
2567static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2568{
2569    tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2570    return NO_EXIT;
2571}
2572
2573static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2574{
2575    gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2576    return NO_EXIT;
2577}
2578
2579static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2580{
2581    gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2582    return NO_EXIT;
2583}
2584
2585static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2586{
2587    gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2588    return NO_EXIT;
2589}
2590
2591static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2592{
2593    gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2594    return_low128(o->out2);
2595    return NO_EXIT;
2596}
2597
2598static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2599{
2600    gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2601    return_low128(o->out2);
2602    return NO_EXIT;
2603}
2604
2605static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2606{
2607    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2608    gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2609    tcg_temp_free_i64(r3);
2610    return NO_EXIT;
2611}
2612
2613static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2614{
2615    int r3 = get_field(s->fields, r3);
2616    gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2617    return NO_EXIT;
2618}
2619
2620static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2621{
2622    TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2623    gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2624    tcg_temp_free_i64(r3);
2625    return NO_EXIT;
2626}
2627
2628static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2629{
2630    int r3 = get_field(s->fields, r3);
2631    gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2632    return NO_EXIT;
2633}
2634
2635static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2636{
2637    gen_helper_nabs_i64(o->out, o->in2);
2638    return NO_EXIT;
2639}
2640
2641static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2642{
2643    tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2644    return NO_EXIT;
2645}
2646
2647static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2648{
2649    tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2650    return NO_EXIT;
2651}
2652
2653static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2654{
2655    tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2656    tcg_gen_mov_i64(o->out2, o->in2);
2657    return NO_EXIT;
2658}
2659
2660static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2661{
2662    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2663    potential_page_fault(s);
2664    gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2665    tcg_temp_free_i32(l);
2666    set_cc_static(s);
2667    return NO_EXIT;
2668}
2669
2670static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2671{
2672    tcg_gen_neg_i64(o->out, o->in2);
2673    return NO_EXIT;
2674}
2675
2676static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2677{
2678    tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2679    return NO_EXIT;
2680}
2681
2682static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2683{
2684    tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2685    return NO_EXIT;
2686}
2687
2688static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2689{
2690    tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2691    tcg_gen_mov_i64(o->out2, o->in2);
2692    return NO_EXIT;
2693}
2694
2695static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2696{
2697    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2698    potential_page_fault(s);
2699    gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2700    tcg_temp_free_i32(l);
2701    set_cc_static(s);
2702    return NO_EXIT;
2703}
2704
2705static ExitStatus op_or(DisasContext *s, DisasOps *o)
2706{
2707    tcg_gen_or_i64(o->out, o->in1, o->in2);
2708    return NO_EXIT;
2709}
2710
2711static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2712{
2713    int shift = s->insn->data & 0xff;
2714    int size = s->insn->data >> 8;
2715    uint64_t mask = ((1ull << size) - 1) << shift;
2716
2717    assert(!o->g_in2);
2718    tcg_gen_shli_i64(o->in2, o->in2, shift);
2719    tcg_gen_or_i64(o->out, o->in1, o->in2);
2720
2721    /* Produce the CC from only the bits manipulated.  */
2722    tcg_gen_andi_i64(cc_dst, o->out, mask);
2723    set_cc_nz_u64(s, cc_dst);
2724    return NO_EXIT;
2725}
2726
2727static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2728{
2729    gen_helper_popcnt(o->out, o->in2);
2730    return NO_EXIT;
2731}
2732
2733#ifndef CONFIG_USER_ONLY
2734static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2735{
2736    check_privileged(s);
2737    gen_helper_ptlb(cpu_env);
2738    return NO_EXIT;
2739}
2740#endif
2741
2742static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2743{
2744    int i3 = get_field(s->fields, i3);
2745    int i4 = get_field(s->fields, i4);
2746    int i5 = get_field(s->fields, i5);
2747    int do_zero = i4 & 0x80;
2748    uint64_t mask, imask, pmask;
2749    int pos, len, rot;
2750
2751    /* Adjust the arguments for the specific insn.  */
2752    switch (s->fields->op2) {
2753    case 0x55: /* risbg */
2754        i3 &= 63;
2755        i4 &= 63;
2756        pmask = ~0;
2757        break;
2758    case 0x5d: /* risbhg */
2759        i3 &= 31;
2760        i4 &= 31;
2761        pmask = 0xffffffff00000000ull;
2762        break;
2763    case 0x51: /* risblg */
2764        i3 &= 31;
2765        i4 &= 31;
2766        pmask = 0x00000000ffffffffull;
2767        break;
2768    default:
2769        abort();
2770    }
2771
2772    /* MASK is the set of bits to be inserted from R2.
2773       Take care for I3/I4 wraparound.  */
2774    mask = pmask >> i3;
2775    if (i3 <= i4) {
2776        mask ^= pmask >> i4 >> 1;
2777    } else {
2778        mask |= ~(pmask >> i4 >> 1);
2779    }
2780    mask &= pmask;
2781
2782    /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
2783       insns, we need to keep the other half of the register.  */
2784    imask = ~mask | ~pmask;
2785    if (do_zero) {
2786        if (s->fields->op2 == 0x55) {
2787            imask = 0;
2788        } else {
2789            imask = ~pmask;
2790        }
2791    }
2792
2793    /* In some cases we can implement this with deposit, which can be more
2794       efficient on some hosts.  */
2795    if (~mask == imask && i3 <= i4) {
2796        if (s->fields->op2 == 0x5d) {
2797            i3 += 32, i4 += 32;
2798        }
2799        /* Note that we rotate the bits to be inserted to the lsb, not to
2800           the position as described in the PoO.  */
2801        len = i4 - i3 + 1;
2802        pos = 63 - i4;
2803        rot = (i5 - pos) & 63;
2804    } else {
2805        pos = len = -1;
2806        rot = i5 & 63;
2807    }
2808
2809    /* Rotate the input as necessary.  */
2810    tcg_gen_rotli_i64(o->in2, o->in2, rot);
2811
2812    /* Insert the selected bits into the output.  */
2813    if (pos >= 0) {
2814        tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2815    } else if (imask == 0) {
2816        tcg_gen_andi_i64(o->out, o->in2, mask);
2817    } else {
2818        tcg_gen_andi_i64(o->in2, o->in2, mask);
2819        tcg_gen_andi_i64(o->out, o->out, imask);
2820        tcg_gen_or_i64(o->out, o->out, o->in2);
2821    }
2822    return NO_EXIT;
2823}
2824
2825static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2826{
2827    int i3 = get_field(s->fields, i3);
2828    int i4 = get_field(s->fields, i4);
2829    int i5 = get_field(s->fields, i5);
2830    uint64_t mask;
2831
2832    /* If this is a test-only form, arrange to discard the result.  */
2833    if (i3 & 0x80) {
2834        o->out = tcg_temp_new_i64();
2835        o->g_out = false;
2836    }
2837
2838    i3 &= 63;
2839    i4 &= 63;
2840    i5 &= 63;
2841
2842    /* MASK is the set of bits to be operated on from R2.
2843       Take care for I3/I4 wraparound.  */
2844    mask = ~0ull >> i3;
2845    if (i3 <= i4) {
2846        mask ^= ~0ull >> i4 >> 1;
2847    } else {
2848        mask |= ~(~0ull >> i4 >> 1);
2849    }
2850
2851    /* Rotate the input as necessary.  */
2852    tcg_gen_rotli_i64(o->in2, o->in2, i5);
2853
2854    /* Operate.  */
2855    switch (s->fields->op2) {
2856    case 0x55: /* AND */
2857        tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2858        tcg_gen_and_i64(o->out, o->out, o->in2);
2859        break;
2860    case 0x56: /* OR */
2861        tcg_gen_andi_i64(o->in2, o->in2, mask);
2862        tcg_gen_or_i64(o->out, o->out, o->in2);
2863        break;
2864    case 0x57: /* XOR */
2865        tcg_gen_andi_i64(o->in2, o->in2, mask);
2866        tcg_gen_xor_i64(o->out, o->out, o->in2);
2867        break;
2868    default:
2869        abort();
2870    }
2871
2872    /* Set the CC.  */
2873    tcg_gen_andi_i64(cc_dst, o->out, mask);
2874    set_cc_nz_u64(s, cc_dst);
2875    return NO_EXIT;
2876}
2877
2878static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2879{
2880    tcg_gen_bswap16_i64(o->out, o->in2);
2881    return NO_EXIT;
2882}
2883
2884static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2885{
2886    tcg_gen_bswap32_i64(o->out, o->in2);
2887    return NO_EXIT;
2888}
2889
2890static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2891{
2892    tcg_gen_bswap64_i64(o->out, o->in2);
2893    return NO_EXIT;
2894}
2895
2896static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2897{
2898    TCGv_i32 t1 = tcg_temp_new_i32();
2899    TCGv_i32 t2 = tcg_temp_new_i32();
2900    TCGv_i32 to = tcg_temp_new_i32();
2901    tcg_gen_trunc_i64_i32(t1, o->in1);
2902    tcg_gen_trunc_i64_i32(t2, o->in2);
2903    tcg_gen_rotl_i32(to, t1, t2);
2904    tcg_gen_extu_i32_i64(o->out, to);
2905    tcg_temp_free_i32(t1);
2906    tcg_temp_free_i32(t2);
2907    tcg_temp_free_i32(to);
2908    return NO_EXIT;
2909}
2910
2911static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2912{
2913    tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2914    return NO_EXIT;
2915}
2916
2917#ifndef CONFIG_USER_ONLY
2918static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2919{
2920    check_privileged(s);
2921    gen_helper_rrbe(cc_op, cpu_env, o->in2);
2922    set_cc_static(s);
2923    return NO_EXIT;
2924}
2925
2926static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2927{
2928    check_privileged(s);
2929    gen_helper_sacf(cpu_env, o->in2);
2930    /* Addressing mode has changed, so end the block.  */
2931    return EXIT_PC_STALE;
2932}
2933#endif
2934
2935static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2936{
2937    int r1 = get_field(s->fields, r1);
2938    tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2939    return NO_EXIT;
2940}
2941
2942static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2943{
2944    gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2945    return NO_EXIT;
2946}
2947
2948static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2949{
2950    gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2951    return NO_EXIT;
2952}
2953
2954static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2955{
2956    gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2957    return_low128(o->out2);
2958    return NO_EXIT;
2959}
2960
2961static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2962{
2963    gen_helper_sqeb(o->out, cpu_env, o->in2);
2964    return NO_EXIT;
2965}
2966
2967static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2968{
2969    gen_helper_sqdb(o->out, cpu_env, o->in2);
2970    return NO_EXIT;
2971}
2972
2973static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2974{
2975    gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2976    return_low128(o->out2);
2977    return NO_EXIT;
2978}
2979
2980#ifndef CONFIG_USER_ONLY
2981static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2982{
2983    check_privileged(s);
2984    potential_page_fault(s);
2985    gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2986    set_cc_static(s);
2987    return NO_EXIT;
2988}
2989
2990static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2991{
2992    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2993    check_privileged(s);
2994    potential_page_fault(s);
2995    gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2996    tcg_temp_free_i32(r1);
2997    return NO_EXIT;
2998}
2999#endif
3000
3001static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3002{
3003    DisasCompare c;
3004    TCGv_i64 a;
3005    int lab, r1;
3006
3007    disas_jcc(s, &c, get_field(s->fields, m3));
3008
3009    lab = gen_new_label();
3010    if (c.is_64) {
3011        tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3012    } else {
3013        tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3014    }
3015    free_compare(&c);
3016
3017    r1 = get_field(s->fields, r1);
3018    a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3019    if (s->insn->data) {
3020        tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3021    } else {
3022        tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3023    }
3024    tcg_temp_free_i64(a);
3025
3026    gen_set_label(lab);
3027    return NO_EXIT;
3028}
3029
3030static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3031{
3032    uint64_t sign = 1ull << s->insn->data;
3033    enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3034    gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3035    tcg_gen_shl_i64(o->out, o->in1, o->in2);
3036    /* The arithmetic left shift is curious in that it does not affect
3037       the sign bit.  Copy that over from the source unchanged.  */
3038    tcg_gen_andi_i64(o->out, o->out, ~sign);
3039    tcg_gen_andi_i64(o->in1, o->in1, sign);
3040    tcg_gen_or_i64(o->out, o->out, o->in1);
3041    return NO_EXIT;
3042}
3043
3044static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3045{
3046    tcg_gen_shl_i64(o->out, o->in1, o->in2);
3047    return NO_EXIT;
3048}
3049
3050static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3051{
3052    tcg_gen_sar_i64(o->out, o->in1, o->in2);
3053    return NO_EXIT;
3054}
3055
3056static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3057{
3058    tcg_gen_shr_i64(o->out, o->in1, o->in2);
3059    return NO_EXIT;
3060}
3061
3062static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3063{
3064    gen_helper_sfpc(cpu_env, o->in2);
3065    return NO_EXIT;
3066}
3067
3068static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3069{
3070    gen_helper_sfas(cpu_env, o->in2);
3071    return NO_EXIT;
3072}
3073
3074static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3075{
3076    int b2 = get_field(s->fields, b2);
3077    int d2 = get_field(s->fields, d2);
3078    TCGv_i64 t1 = tcg_temp_new_i64();
3079    TCGv_i64 t2 = tcg_temp_new_i64();
3080    int mask, pos, len;
3081
3082    switch (s->fields->op2) {
3083    case 0x99: /* SRNM */
3084        pos = 0, len = 2;
3085        break;
3086    case 0xb8: /* SRNMB */
3087        pos = 0, len = 3;
3088        break;
3089    case 0xb9: /* SRNMT */
3090        pos = 4, len = 3;
3091        break;
3092    default:
3093        tcg_abort();
3094    }
3095    mask = (1 << len) - 1;
3096
3097    /* Insert the value into the appropriate field of the FPC.  */
3098    if (b2 == 0) {
3099        tcg_gen_movi_i64(t1, d2 & mask);
3100    } else {
3101        tcg_gen_addi_i64(t1, regs[b2], d2);
3102        tcg_gen_andi_i64(t1, t1, mask);
3103    }
3104    tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3105    tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3106    tcg_temp_free_i64(t1);
3107
3108    /* Then install the new FPC to set the rounding mode in fpu_status.  */
3109    gen_helper_sfpc(cpu_env, t2);
3110    tcg_temp_free_i64(t2);
3111    return NO_EXIT;
3112}
3113
3114#ifndef CONFIG_USER_ONLY
3115static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3116{
3117    check_privileged(s);
3118    tcg_gen_shri_i64(o->in2, o->in2, 4);
3119    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3120    return NO_EXIT;
3121}
3122
3123static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3124{
3125    check_privileged(s);
3126    gen_helper_sske(cpu_env, o->in1, o->in2);
3127    return NO_EXIT;
3128}
3129
3130static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3131{
3132    check_privileged(s);
3133    tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3134    return NO_EXIT;
3135}
3136
3137static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3138{
3139    check_privileged(s);
3140    /* ??? Surely cpu address != cpu number.  In any case the previous
3141       version of this stored more than the required half-word, so it
3142       is unlikely this has ever been tested.  */
3143    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3144    return NO_EXIT;
3145}
3146
3147static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3148{
3149    gen_helper_stck(o->out, cpu_env);
3150    /* ??? We don't implement clock states.  */
3151    gen_op_movi_cc(s, 0);
3152    return NO_EXIT;
3153}
3154
3155static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3156{
3157    TCGv_i64 c1 = tcg_temp_new_i64();
3158    TCGv_i64 c2 = tcg_temp_new_i64();
3159    gen_helper_stck(c1, cpu_env);
3160    /* Shift the 64-bit value into its place as a zero-extended
3161       104-bit value.  Note that "bit positions 64-103 are always
3162       non-zero so that they compare differently to STCK"; we set
3163       the least significant bit to 1.  */
3164    tcg_gen_shli_i64(c2, c1, 56);
3165    tcg_gen_shri_i64(c1, c1, 8);
3166    tcg_gen_ori_i64(c2, c2, 0x10000);
3167    tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3168    tcg_gen_addi_i64(o->in2, o->in2, 8);
3169    tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3170    tcg_temp_free_i64(c1);
3171    tcg_temp_free_i64(c2);
3172    /* ??? We don't implement clock states.  */
3173    gen_op_movi_cc(s, 0);
3174    return NO_EXIT;
3175}
3176
3177static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3178{
3179    check_privileged(s);
3180    gen_helper_sckc(cpu_env, o->in2);
3181    return NO_EXIT;
3182}
3183
3184static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3185{
3186    check_privileged(s);
3187    gen_helper_stckc(o->out, cpu_env);
3188    return NO_EXIT;
3189}
3190
3191static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3192{
3193    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3194    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3195    check_privileged(s);
3196    potential_page_fault(s);
3197    gen_helper_stctg(cpu_env, r1, o->in2, r3);
3198    tcg_temp_free_i32(r1);
3199    tcg_temp_free_i32(r3);
3200    return NO_EXIT;
3201}
3202
3203static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3204{
3205    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3206    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3207    check_privileged(s);
3208    potential_page_fault(s);
3209    gen_helper_stctl(cpu_env, r1, o->in2, r3);
3210    tcg_temp_free_i32(r1);
3211    tcg_temp_free_i32(r3);
3212    return NO_EXIT;
3213}
3214
3215static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3216{
3217    check_privileged(s);
3218    tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3219    return NO_EXIT;
3220}
3221
3222static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3223{
3224    check_privileged(s);
3225    gen_helper_spt(cpu_env, o->in2);
3226    return NO_EXIT;
3227}
3228
3229static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3230{
3231    TCGv_i64 f, a;
3232    /* We really ought to have more complete indication of facilities
3233       that we implement.  Address this when STFLE is implemented.  */
3234    check_privileged(s);
3235    f = tcg_const_i64(0xc0000000);
3236    a = tcg_const_i64(200);
3237    tcg_gen_qemu_st32(f, a, get_mem_index(s));
3238    tcg_temp_free_i64(f);
3239    tcg_temp_free_i64(a);
3240    return NO_EXIT;
3241}
3242
3243static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3244{
3245    check_privileged(s);
3246    gen_helper_stpt(o->out, cpu_env);
3247    return NO_EXIT;
3248}
3249
3250static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3251{
3252    check_privileged(s);
3253    potential_page_fault(s);
3254    gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3255    set_cc_static(s);
3256    return NO_EXIT;
3257}
3258
3259static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3260{
3261    check_privileged(s);
3262    gen_helper_spx(cpu_env, o->in2);
3263    return NO_EXIT;
3264}
3265
3266static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3267{
3268    check_privileged(s);
3269    /* Not operational.  */
3270    gen_op_movi_cc(s, 3);
3271    return NO_EXIT;
3272}
3273
3274static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3275{
3276    check_privileged(s);
3277    tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3278    tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3279    return NO_EXIT;
3280}
3281
3282static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3283{
3284    uint64_t i2 = get_field(s->fields, i2);
3285    TCGv_i64 t;
3286
3287    check_privileged(s);
3288
3289    /* It is important to do what the instruction name says: STORE THEN.
3290       If we let the output hook perform the store then if we fault and
3291       restart, we'll have the wrong SYSTEM MASK in place.  */
3292    t = tcg_temp_new_i64();
3293    tcg_gen_shri_i64(t, psw_mask, 56);
3294    tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3295    tcg_temp_free_i64(t);
3296
3297    if (s->fields->op == 0xac) {
3298        tcg_gen_andi_i64(psw_mask, psw_mask,
3299                         (i2 << 56) | 0x00ffffffffffffffull);
3300    } else {
3301        tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3302    }
3303    return NO_EXIT;
3304}
3305
3306static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3307{
3308    check_privileged(s);
3309    potential_page_fault(s);
3310    gen_helper_stura(cpu_env, o->in2, o->in1);
3311    return NO_EXIT;
3312}
3313#endif
3314
3315static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3316{
3317    tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3318    return NO_EXIT;
3319}
3320
3321static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3322{
3323    tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3324    return NO_EXIT;
3325}
3326
3327static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3328{
3329    tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3330    return NO_EXIT;
3331}
3332
3333static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3334{
3335    tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3336    return NO_EXIT;
3337}
3338
3339static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3340{
3341    TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3342    TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3343    potential_page_fault(s);
3344    gen_helper_stam(cpu_env, r1, o->in2, r3);
3345    tcg_temp_free_i32(r1);
3346    tcg_temp_free_i32(r3);
3347    return NO_EXIT;
3348}
3349
3350static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3351{
3352    int m3 = get_field(s->fields, m3);
3353    int pos, base = s->insn->data;
3354    TCGv_i64 tmp = tcg_temp_new_i64();
3355
3356    pos = base + ctz32(m3) * 8;
3357    switch (m3) {
3358    case 0xf:
3359        /* Effectively a 32-bit store.  */
3360        tcg_gen_shri_i64(tmp, o->in1, pos);
3361        tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3362        break;
3363
3364    case 0xc:
3365    case 0x6:
3366    case 0x3:
3367        /* Effectively a 16-bit store.  */
3368        tcg_gen_shri_i64(tmp, o->in1, pos);
3369        tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3370        break;
3371
3372    case 0x8:
3373    case 0x4:
3374    case 0x2:
3375    case 0x1:
3376        /* Effectively an 8-bit store.  */
3377        tcg_gen_shri_i64(tmp, o->in1, pos);
3378        tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3379        break;
3380
3381    default:
3382        /* This is going to be a sequence of shifts and stores.  */
3383        pos = base + 32 - 8;
3384        while (m3) {
3385            if (m3 & 0x8) {
3386                tcg_gen_shri_i64(tmp, o->in1, pos);
3387                tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3388                tcg_gen_addi_i64(o->in2, o->in2, 1);
3389            }
3390            m3 = (m3 << 1) & 0xf;
3391            pos -= 8;
3392        }
3393        break;
3394    }
3395    tcg_temp_free_i64(tmp);
3396    return NO_EXIT;
3397}
3398
3399static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3400{
3401    int r1 = get_field(s->fields, r1);
3402    int r3 = get_field(s->fields, r3);
3403    int size = s->insn->data;
3404    TCGv_i64 tsize = tcg_const_i64(size);
3405
3406    while (1) {
3407        if (size == 8) {
3408            tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3409        } else {
3410            tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3411        }
3412        if (r1 == r3) {
3413            break;
3414        }
3415        tcg_gen_add_i64(o->in2, o->in2, tsize);
3416        r1 = (r1 + 1) & 15;
3417    }
3418
3419    tcg_temp_free_i64(tsize);
3420    return NO_EXIT;
3421}
3422
3423static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3424{
3425    int r1 = get_field(s->fields, r1);
3426    int r3 = get_field(s->fields, r3);
3427    TCGv_i64 t = tcg_temp_new_i64();
3428    TCGv_i64 t4 = tcg_const_i64(4);
3429    TCGv_i64 t32 = tcg_const_i64(32);
3430
3431    while (1) {
3432        tcg_gen_shl_i64(t, regs[r1], t32);
3433        tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3434        if (r1 == r3) {
3435            break;
3436        }
3437        tcg_gen_add_i64(o->in2, o->in2, t4);
3438        r1 = (r1 + 1) & 15;
3439    }
3440
3441    tcg_temp_free_i64(t);
3442    tcg_temp_free_i64(t4);
3443    tcg_temp_free_i64(t32);
3444    return NO_EXIT;
3445}
3446
3447static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3448{
3449    potential_page_fault(s);
3450    gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3451    set_cc_static(s);
3452    return_low128(o->in2);
3453    return NO_EXIT;
3454}
3455
3456static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3457{
3458    tcg_gen_sub_i64(o->out, o->in1, o->in2);
3459    return NO_EXIT;
3460}
3461
3462static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3463{
3464    DisasCompare cmp;
3465    TCGv_i64 borrow;
3466
3467    tcg_gen_sub_i64(o->out, o->in1, o->in2);
3468
3469    /* The !borrow flag is the msb of CC.  Since we want the inverse of
3470       that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4.  */
3471    disas_jcc(s, &cmp, 8 | 4);
3472    borrow = tcg_temp_new_i64();
3473    if (cmp.is_64) {
3474        tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3475    } else {
3476        TCGv_i32 t = tcg_temp_new_i32();
3477        tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3478        tcg_gen_extu_i32_i64(borrow, t);
3479        tcg_temp_free_i32(t);
3480    }
3481    free_compare(&cmp);
3482
3483    tcg_gen_sub_i64(o->out, o->out, borrow);
3484    tcg_temp_free_i64(borrow);
3485    return NO_EXIT;
3486}
3487
3488static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3489{
3490    TCGv_i32 t;
3491
3492    update_psw_addr(s);
3493    update_cc_op(s);
3494
3495    t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3496    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3497    tcg_temp_free_i32(t);
3498
3499    t = tcg_const_i32(s->next_pc - s->pc);
3500    tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3501    tcg_temp_free_i32(t);
3502
3503    gen_exception(EXCP_SVC);
3504    return EXIT_NORETURN;
3505}
3506
3507static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3508{
3509    gen_helper_tceb(cc_op, o->in1, o->in2);
3510    set_cc_static(s);
3511    return NO_EXIT;
3512}
3513
3514static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3515{
3516    gen_helper_tcdb(cc_op, o->in1, o->in2);
3517    set_cc_static(s);
3518    return NO_EXIT;
3519}
3520
3521static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3522{
3523    gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3524    set_cc_static(s);
3525    return NO_EXIT;
3526}
3527
3528#ifndef CONFIG_USER_ONLY
3529static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3530{
3531    potential_page_fault(s);
3532    gen_helper_tprot(cc_op, o->addr1, o->in2);
3533    set_cc_static(s);
3534    return NO_EXIT;
3535}
3536#endif
3537
3538static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3539{
3540    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3541    potential_page_fault(s);
3542    gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3543    tcg_temp_free_i32(l);
3544    set_cc_static(s);
3545    return NO_EXIT;
3546}
3547
3548static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3549{
3550    TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3551    potential_page_fault(s);
3552    gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3553    tcg_temp_free_i32(l);
3554    return NO_EXIT;
3555}
3556
3557static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3558{
3559    int d1 = get_field(s->fields, d1);
3560    int d2 = get_field(s->fields, d2);
3561    int b1 = get_field(s->fields, b1);
3562    int b2 = get_field(s->fields, b2);
3563    int l = get_field(s->fields, l1);
3564    TCGv_i32 t32;
3565
3566    o->addr1 = get_address(s, 0, b1, d1);
3567
3568    /* If the addresses are identical, this is a store/memset of zero.  */
3569    if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3570        o->in2 = tcg_const_i64(0);
3571
3572        l++;
3573        while (l >= 8) {
3574            tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3575            l -= 8;
3576            if (l > 0) {
3577                tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3578            }
3579        }
3580        if (l >= 4) {
3581            tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3582            l -= 4;
3583            if (l > 0) {
3584                tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3585            }
3586        }
3587        if (l >= 2) {
3588            tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3589            l -= 2;
3590            if (l > 0) {
3591                tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3592            }
3593        }
3594        if (l) {
3595            tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3596        }
3597        gen_op_movi_cc(s, 0);
3598        return NO_EXIT;
3599    }
3600
3601    /* But in general we'll defer to a helper.  */
3602    o->in2 = get_address(s, 0, b2, d2);
3603    t32 = tcg_const_i32(l);
3604    potential_page_fault(s);
3605    gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3606    tcg_temp_free_i32(t32);
3607    set_cc_static(s);
3608    return NO_EXIT;
3609}
3610
3611static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3612{
3613    tcg_gen_xor_i64(o->out, o->in1, o->in2);
3614    return NO_EXIT;
3615}
3616
3617static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3618{
3619    int shift = s->insn->data & 0xff;
3620    int size = s->insn->data >> 8;
3621    uint64_t mask = ((1ull << size) - 1) << shift;
3622
3623    assert(!o->g_in2);
3624    tcg_gen_shli_i64(o->in2, o->in2, shift);
3625    tcg_gen_xor_i64(o->out, o->in1, o->in2);
3626
3627    /* Produce the CC from only the bits manipulated.  */
3628    tcg_gen_andi_i64(cc_dst, o->out, mask);
3629    set_cc_nz_u64(s, cc_dst);
3630    return NO_EXIT;
3631}
3632
3633static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3634{
3635    o->out = tcg_const_i64(0);
3636    return NO_EXIT;
3637}
3638
3639static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3640{
3641    o->out = tcg_const_i64(0);
3642    o->out2 = o->out;
3643    o->g_out2 = true;
3644    return NO_EXIT;
3645}
3646
3647/* ====================================================================== */
3648/* The "Cc OUTput" generators.  Given the generated output (and in some cases
3649   the original inputs), update the various cc data structures in order to
3650   be able to compute the new condition code.  */
3651
3652static void cout_abs32(DisasContext *s, DisasOps *o)
3653{
3654    gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3655}
3656
3657static void cout_abs64(DisasContext *s, DisasOps *o)
3658{
3659    gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3660}
3661
3662static void cout_adds32(DisasContext *s, DisasOps *o)
3663{
3664    gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3665}
3666
3667static void cout_adds64(DisasContext *s, DisasOps *o)
3668{
3669    gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3670}
3671
3672static void cout_addu32(DisasContext *s, DisasOps *o)
3673{
3674    gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3675}
3676
3677static void cout_addu64(DisasContext *s, DisasOps *o)
3678{
3679    gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3680}
3681
3682static void cout_addc32(DisasContext *s, DisasOps *o)
3683{
3684    gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3685}
3686
3687static void cout_addc64(DisasContext *s, DisasOps *o)
3688{
3689    gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3690}
3691
3692static void cout_cmps32(DisasContext *s, DisasOps *o)
3693{
3694    gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3695}
3696
3697static void cout_cmps64(DisasContext *s, DisasOps *o)
3698{
3699    gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3700}
3701
3702static void cout_cmpu32(DisasContext *s, DisasOps *o)
3703{
3704    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3705}
3706
3707static void cout_cmpu64(DisasContext *s, DisasOps *o)
3708{
3709    gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3710}
3711
3712static void cout_f32(DisasContext *s, DisasOps *o)
3713{
3714    gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3715}
3716
3717static void cout_f64(DisasContext *s, DisasOps *o)
3718{
3719    gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3720}
3721
3722static void cout_f128(DisasContext *s, DisasOps *o)
3723{
3724    gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3725}
3726
3727static void cout_nabs32(DisasContext *s, DisasOps *o)
3728{
3729    gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3730}
3731
3732static void cout_nabs64(DisasContext *s, DisasOps *o)
3733{
3734    gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3735}
3736
3737static void cout_neg32(DisasContext *s, DisasOps *o)
3738{
3739    gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3740}
3741
3742static void cout_neg64(DisasContext *s, DisasOps *o)
3743{
3744    gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3745}
3746
3747static void cout_nz32(DisasContext *s, DisasOps *o)
3748{
3749    tcg_gen_ext32u_i64(cc_dst, o->out);
3750    gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3751}
3752
3753static void cout_nz64(DisasContext *s, DisasOps *o)
3754{
3755    gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3756}
3757
3758static void cout_s32(DisasContext *s, DisasOps *o)
3759{
3760    gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3761}
3762
3763static void cout_s64(DisasContext *s, DisasOps *o)
3764{
3765    gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3766}
3767
3768static void cout_subs32(DisasContext *s, DisasOps *o)
3769{
3770    gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3771}
3772
3773static void cout_subs64(DisasContext *s, DisasOps *o)
3774{
3775    gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3776}
3777
3778static void cout_subu32(DisasContext *s, DisasOps *o)
3779{
3780    gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3781}
3782
3783static void cout_subu64(DisasContext *s, DisasOps *o)
3784{
3785    gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3786}
3787
3788static void cout_subb32(DisasContext *s, DisasOps *o)
3789{
3790    gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3791}
3792
3793static void cout_subb64(DisasContext *s, DisasOps *o)
3794{
3795    gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3796}
3797
3798static void cout_tm32(DisasContext *s, DisasOps *o)
3799{
3800    gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3801}
3802
3803static void cout_tm64(DisasContext *s, DisasOps *o)
3804{
3805    gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3806}
3807
3808/* ====================================================================== */
3809/* The "PREParation" generators.  These initialize the DisasOps.OUT fields
3810   with the TCG register to which we will write.  Used in combination with
3811   the "wout" generators, in some cases we need a new temporary, and in
3812   some cases we can write to a TCG global.  */
3813
3814static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3815{
3816    o->out = tcg_temp_new_i64();
3817}
3818#define SPEC_prep_new 0
3819
3820static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3821{
3822    o->out = tcg_temp_new_i64();
3823    o->out2 = tcg_temp_new_i64();
3824}
3825#define SPEC_prep_new_P 0
3826
3827static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3828{
3829    o->out = regs[get_field(f, r1)];
3830    o->g_out = true;
3831}
3832#define SPEC_prep_r1 0
3833
3834static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3835{
3836    int r1 = get_field(f, r1);
3837    o->out = regs[r1];
3838    o->out2 = regs[r1 + 1];
3839    o->g_out = o->g_out2 = true;
3840}
3841#define SPEC_prep_r1_P SPEC_r1_even
3842
3843static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3844{
3845    o->out = fregs[get_field(f, r1)];
3846    o->g_out = true;
3847}
3848#define SPEC_prep_f1 0
3849
3850static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3851{
3852    int r1 = get_field(f, r1);
3853    o->out = fregs[r1];
3854    o->out2 = fregs[r1 + 2];
3855    o->g_out = o->g_out2 = true;
3856}
3857#define SPEC_prep_x1 SPEC_r1_f128
3858
3859/* ====================================================================== */
3860/* The "Write OUTput" generators.  These generally perform some non-trivial
3861   copy of data to TCG globals, or to main memory.  The trivial cases are
3862   generally handled by having a "prep" generator install the TCG global
3863   as the destination of the operation.  */
3864
3865static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3866{
3867    store_reg(get_field(f, r1), o->out);
3868}
3869#define SPEC_wout_r1 0
3870
3871static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3872{
3873    int r1 = get_field(f, r1);
3874    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3875}
3876#define SPEC_wout_r1_8 0
3877
3878static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3879{
3880    int r1 = get_field(f, r1);
3881    tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3882}
3883#define SPEC_wout_r1_16 0
3884
3885static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3886{
3887    store_reg32_i64(get_field(f, r1), o->out);
3888}
3889#define SPEC_wout_r1_32 0
3890
3891static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3892{
3893    int r1 = get_field(f, r1);
3894    store_reg32_i64(r1, o->out);
3895    store_reg32_i64(r1 + 1, o->out2);
3896}
3897#define SPEC_wout_r1_P32 SPEC_r1_even
3898
3899static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3900{
3901    int r1 = get_field(f, r1);
3902    store_reg32_i64(r1 + 1, o->out);
3903    tcg_gen_shri_i64(o->out, o->out, 32);
3904    store_reg32_i64(r1, o->out);
3905}
3906#define SPEC_wout_r1_D32 SPEC_r1_even
3907
3908static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3909{
3910    store_freg32_i64(get_field(f, r1), o->out);
3911}
3912#define SPEC_wout_e1 0
3913
3914static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3915{
3916    store_freg(get_field(f, r1), o->out);
3917}
3918#define SPEC_wout_f1 0
3919
3920static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3921{
3922    int f1 = get_field(s->fields, r1);
3923    store_freg(f1, o->out);
3924    store_freg(f1 + 2, o->out2);
3925}
3926#define SPEC_wout_x1 SPEC_r1_f128
3927
3928static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3929{
3930    if (get_field(f, r1) != get_field(f, r2)) {
3931        store_reg32_i64(get_field(f, r1), o->out);
3932    }
3933}
3934#define SPEC_wout_cond_r1r2_32 0
3935
3936static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3937{
3938    if (get_field(f, r1) != get_field(f, r2)) {
3939        store_freg32_i64(get_field(f, r1), o->out);
3940    }
3941}
3942#define SPEC_wout_cond_e1e2 0
3943
3944static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3945{
3946    tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3947}
3948#define SPEC_wout_m1_8 0
3949
3950static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3951{
3952    tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3953}
3954#define SPEC_wout_m1_16 0
3955
3956static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3957{
3958    tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3959}
3960#define SPEC_wout_m1_32 0
3961
3962static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3963{
3964    tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3965}
3966#define SPEC_wout_m1_64 0
3967
3968static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3969{
3970    tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3971}
3972#define SPEC_wout_m2_32 0
3973
3974/* ====================================================================== */
3975/* The "INput 1" generators.  These load the first operand to an insn.  */
3976
3977static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3978{
3979    o->in1 = load_reg(get_field(f, r1));
3980}
3981#define SPEC_in1_r1 0
3982
3983static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3984{
3985    o->in1 = regs[get_field(f, r1)];
3986    o->g_in1 = true;
3987}
3988#define SPEC_in1_r1_o 0
3989
3990static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3991{
3992    o->in1 = tcg_temp_new_i64();
3993    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3994}
3995#define SPEC_in1_r1_32s 0
3996
3997static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3998{
3999    o->in1 = tcg_temp_new_i64();
4000    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4001}
4002#define SPEC_in1_r1_32u 0
4003
4004static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4005{
4006    o->in1 = tcg_temp_new_i64();
4007    tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4008}
4009#define SPEC_in1_r1_sr32 0
4010
4011static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4012{
4013    o->in1 = load_reg(get_field(f, r1) + 1);
4014}
4015#define SPEC_in1_r1p1 SPEC_r1_even
4016
4017static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4018{
4019    o->in1 = tcg_temp_new_i64();
4020    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4021}
4022#define SPEC_in1_r1p1_32s SPEC_r1_even
4023
4024static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4025{
4026    o->in1 = tcg_temp_new_i64();
4027    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4028}
4029#define SPEC_in1_r1p1_32u SPEC_r1_even
4030
4031static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4032{
4033    int r1 = get_field(f, r1);
4034    o->in1 = tcg_temp_new_i64();
4035    tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4036}
4037#define SPEC_in1_r1_D32 SPEC_r1_even
4038
4039static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4040{
4041    o->in1 = load_reg(get_field(f, r2));
4042}
4043#define SPEC_in1_r2 0
4044
4045static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4046{
4047    o->in1 = load_reg(get_field(f, r3));
4048}
4049#define SPEC_in1_r3 0
4050
4051static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4052{
4053    o->in1 = regs[get_field(f, r3)];
4054    o->g_in1 = true;
4055}
4056#define SPEC_in1_r3_o 0
4057
4058static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4059{
4060    o->in1 = tcg_temp_new_i64();
4061    tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4062}
4063#define SPEC_in1_r3_32s 0
4064
4065static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4066{
4067    o->in1 = tcg_temp_new_i64();
4068    tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4069}
4070#define SPEC_in1_r3_32u 0
4071
4072static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4073{
4074    int r3 = get_field(f, r3);
4075    o->in1 = tcg_temp_new_i64();
4076    tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4077}
4078#define SPEC_in1_r3_D32 SPEC_r3_even
4079
4080static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4081{
4082    o->in1 = load_freg32_i64(get_field(f, r1));
4083}
4084#define SPEC_in1_e1 0
4085
4086static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4087{
4088    o->in1 = fregs[get_field(f, r1)];
4089    o->g_in1 = true;
4090}
4091#define SPEC_in1_f1_o 0
4092
4093static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4094{
4095    int r1 = get_field(f, r1);
4096    o->out = fregs[r1];
4097    o->out2 = fregs[r1 + 2];
4098    o->g_out = o->g_out2 = true;
4099}
4100#define SPEC_in1_x1_o SPEC_r1_f128
4101
4102static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4103{
4104    o->in1 = fregs[get_field(f, r3)];
4105    o->g_in1 = true;
4106}
4107#define SPEC_in1_f3_o 0
4108
4109static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4110{
4111    o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4112}
4113#define SPEC_in1_la1 0
4114
4115static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4116{
4117    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4118    o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4119}
4120#define SPEC_in1_la2 0
4121
4122static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4123{
4124    in1_la1(s, f, o);
4125    o->in1 = tcg_temp_new_i64();
4126    tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4127}
4128#define SPEC_in1_m1_8u 0
4129
4130static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4131{
4132    in1_la1(s, f, o);
4133    o->in1 = tcg_temp_new_i64();
4134    tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4135}
4136#define SPEC_in1_m1_16s 0
4137
4138static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4139{
4140    in1_la1(s, f, o);
4141    o->in1 = tcg_temp_new_i64();
4142    tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4143}
4144#define SPEC_in1_m1_16u 0
4145
4146static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4147{
4148    in1_la1(s, f, o);
4149    o->in1 = tcg_temp_new_i64();
4150    tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4151}
4152#define SPEC_in1_m1_32s 0
4153
4154static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4155{
4156    in1_la1(s, f, o);
4157    o->in1 = tcg_temp_new_i64();
4158    tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4159}
4160#define SPEC_in1_m1_32u 0
4161
4162static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4163{
4164    in1_la1(s, f, o);
4165    o->in1 = tcg_temp_new_i64();
4166    tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4167}
4168#define SPEC_in1_m1_64 0
4169
4170/* ====================================================================== */
4171/* The "INput 2" generators.  These load the second operand to an insn.  */
4172
4173static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4174{
4175    o->in2 = regs[get_field(f, r1)];
4176    o->g_in2 = true;
4177}
4178#define SPEC_in2_r1_o 0
4179
4180static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4181{
4182    o->in2 = tcg_temp_new_i64();
4183    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4184}
4185#define SPEC_in2_r1_16u 0
4186
4187static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4188{
4189    o->in2 = tcg_temp_new_i64();
4190    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4191}
4192#define SPEC_in2_r1_32u 0
4193
4194static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4195{
4196    int r1 = get_field(f, r1);
4197    o->in2 = tcg_temp_new_i64();
4198    tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4199}
4200#define SPEC_in2_r1_D32 SPEC_r1_even
4201
4202static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4203{
4204    o->in2 = load_reg(get_field(f, r2));
4205}
4206#define SPEC_in2_r2 0
4207
4208static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4209{
4210    o->in2 = regs[get_field(f, r2)];
4211    o->g_in2 = true;
4212}
4213#define SPEC_in2_r2_o 0
4214
4215static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4216{
4217    int r2 = get_field(f, r2);
4218    if (r2 != 0) {
4219        o->in2 = load_reg(r2);
4220    }
4221}
4222#define SPEC_in2_r2_nz 0
4223
4224static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4225{
4226    o->in2 = tcg_temp_new_i64();
4227    tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4228}
4229#define SPEC_in2_r2_8s 0
4230
4231static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4232{
4233    o->in2 = tcg_temp_new_i64();
4234    tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4235}
4236#define SPEC_in2_r2_8u 0
4237
4238static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4239{
4240    o->in2 = tcg_temp_new_i64();
4241    tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4242}
4243#define SPEC_in2_r2_16s 0
4244
4245static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4246{
4247    o->in2 = tcg_temp_new_i64();
4248    tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4249}
4250#define SPEC_in2_r2_16u 0
4251
4252static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4253{
4254    o->in2 = load_reg(get_field(f, r3));
4255}
4256#define SPEC_in2_r3 0
4257
4258static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4259{
4260    o->in2 = tcg_temp_new_i64();
4261    tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4262}
4263#define SPEC_in2_r2_32s 0
4264
4265static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4266{
4267    o->in2 = tcg_temp_new_i64();
4268    tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4269}
4270#define SPEC_in2_r2_32u 0
4271
4272static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4273{
4274    o->in2 = load_freg32_i64(get_field(f, r2));
4275}
4276#define SPEC_in2_e2 0
4277
4278static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4279{
4280    o->in2 = fregs[get_field(f, r2)];
4281    o->g_in2 = true;
4282}
4283#define SPEC_in2_f2_o 0
4284
4285static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4286{
4287    int r2 = get_field(f, r2);
4288    o->in1 = fregs[r2];
4289    o->in2 = fregs[r2 + 2];
4290    o->g_in1 = o->g_in2 = true;
4291}
4292#define SPEC_in2_x2_o SPEC_r2_f128
4293
4294static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4295{
4296    o->in2 = get_address(s, 0, get_field(f, r2), 0);
4297}
4298#define SPEC_in2_ra2 0
4299
4300static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4301{
4302    int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4303    o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4304}
4305#define SPEC_in2_a2 0
4306
4307static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4308{
4309    o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4310}
4311#define SPEC_in2_ri2 0
4312
4313static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4314{
4315    help_l2_shift(s, f, o, 31);
4316}
4317#define SPEC_in2_sh32 0
4318
4319static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4320{
4321    help_l2_shift(s, f, o, 63);
4322}
4323#define SPEC_in2_sh64 0
4324
4325static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4326{
4327    in2_a2(s, f, o);
4328    tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4329}
4330#define SPEC_in2_m2_8u 0
4331
4332static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4333{
4334    in2_a2(s, f, o);
4335    tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4336}
4337#define SPEC_in2_m2_16s 0
4338
4339static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4340{
4341    in2_a2(s, f, o);
4342    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4343}
4344#define SPEC_in2_m2_16u 0
4345
4346static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4347{
4348    in2_a2(s, f, o);
4349    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4350}
4351#define SPEC_in2_m2_32s 0
4352
4353static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4354{
4355    in2_a2(s, f, o);
4356    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4357}
4358#define SPEC_in2_m2_32u 0
4359
4360static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4361{
4362    in2_a2(s, f, o);
4363    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4364}
4365#define SPEC_in2_m2_64 0
4366
4367static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4368{
4369    in2_ri2(s, f, o);
4370    tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4371}
4372#define SPEC_in2_mri2_16u 0
4373
4374static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4375{
4376    in2_ri2(s, f, o);
4377    tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4378}
4379#define SPEC_in2_mri2_32s 0
4380
4381static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4382{
4383    in2_ri2(s, f, o);
4384    tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4385}
4386#define SPEC_in2_mri2_32u 0
4387
4388static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4389{
4390    in2_ri2(s, f, o);
4391    tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4392}
4393#define SPEC_in2_mri2_64 0
4394
4395static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4396{
4397    o->in2 = tcg_const_i64(get_field(f, i2));
4398}
4399#define SPEC_in2_i2 0
4400
4401static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4402{
4403    o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4404}
4405#define SPEC_in2_i2_8u 0
4406
4407static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4408{
4409    o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4410}
4411#define SPEC_in2_i2_16u 0
4412
4413static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4414{
4415    o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4416}
4417#define SPEC_in2_i2_32u 0
4418
4419static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4420{
4421    uint64_t i2 = (uint16_t)get_field(f, i2);
4422    o->in2 = tcg_const_i64(i2 << s->insn->data);
4423}
4424#define SPEC_in2_i2_16u_shl 0
4425
4426static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4427{
4428    uint64_t i2 = (uint32_t)get_field(f, i2);
4429    o->in2 = tcg_const_i64(i2 << s->insn->data);
4430}
4431#define SPEC_in2_i2_32u_shl 0
4432
4433/* ====================================================================== */
4434
4435/* Find opc within the table of insns.  This is formulated as a switch
4436   statement so that (1) we get compile-time notice of cut-paste errors
4437   for duplicated opcodes, and (2) the compiler generates the binary
4438   search tree, rather than us having to post-process the table.  */
4439
4440#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4441    D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4442
4443#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4444
4445enum DisasInsnEnum {
4446#include "insn-data.def"
4447};
4448
4449#undef D
4450#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) {                       \
4451    .opc = OPC,                                                             \
4452    .fmt = FMT_##FT,                                                        \
4453    .fac = FAC_##FC,                                                        \
4454    .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
4455    .name = #NM,                                                            \
4456    .help_in1 = in1_##I1,                                                   \
4457    .help_in2 = in2_##I2,                                                   \
4458    .help_prep = prep_##P,                                                  \
4459    .help_wout = wout_##W,                                                  \
4460    .help_cout = cout_##CC,                                                 \
4461    .help_op = op_##OP,                                                     \
4462    .data = D                                                               \
4463 },
4464
4465/* Allow 0 to be used for NULL in the table below.  */
4466#define in1_0  NULL
4467#define in2_0  NULL
4468#define prep_0  NULL
4469#define wout_0  NULL
4470#define cout_0  NULL
4471#define op_0  NULL
4472
4473#define SPEC_in1_0 0
4474#define SPEC_in2_0 0
4475#define SPEC_prep_0 0
4476#define SPEC_wout_0 0
4477
4478static const DisasInsn insn_info[] = {
4479#include "insn-data.def"
4480};
4481
4482#undef D
4483#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4484    case OPC: return &insn_info[insn_ ## NM];
4485
4486static const DisasInsn *lookup_opc(uint16_t opc)
4487{
4488    switch (opc) {
4489#include "insn-data.def"
4490    default:
4491        return NULL;
4492    }
4493}
4494
4495#undef D
4496#undef C
4497
4498/* Extract a field from the insn.  The INSN should be left-aligned in
4499   the uint64_t so that we can more easily utilize the big-bit-endian
4500   definitions we extract from the Principals of Operation.  */
4501
4502static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4503{
4504    uint32_t r, m;
4505
4506    if (f->size == 0) {
4507        return;
4508    }
4509
4510    /* Zero extract the field from the insn.  */
4511    r = (insn << f->beg) >> (64 - f->size);
4512
4513    /* Sign-extend, or un-swap the field as necessary.  */
4514    switch (f->type) {
4515    case 0: /* unsigned */
4516        break;
4517    case 1: /* signed */
4518        assert(f->size <= 32);
4519        m = 1u << (f->size - 1);
4520        r = (r ^ m) - m;
4521        break;
4522    case 2: /* dl+dh split, signed 20 bit. */
4523        r = ((int8_t)r << 12) | (r >> 8);
4524        break;
4525    default:
4526        abort();
4527    }
4528
4529    /* Validate that the "compressed" encoding we selected above is valid.
4530       I.e. we havn't make two different original fields overlap.  */
4531    assert(((o->presentC >> f->indexC) & 1) == 0);
4532    o->presentC |= 1 << f->indexC;
4533    o->presentO |= 1 << f->indexO;
4534
4535    o->c[f->indexC] = r;
4536}
4537
4538/* Lookup the insn at the current PC, extracting the operands into O and
4539   returning the info struct for the insn.  Returns NULL for invalid insn.  */
4540
4541static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4542                                     DisasFields *f)
4543{
4544    uint64_t insn, pc = s->pc;
4545    int op, op2, ilen;
4546    const DisasInsn *info;
4547
4548    insn = ld_code2(env, pc);
4549    op = (insn >> 8) & 0xff;
4550    ilen = get_ilen(op);
4551    s->next_pc = s->pc + ilen;
4552
4553    switch (ilen) {
4554    case 2:
4555        insn = insn << 48;
4556        break;
4557    case 4:
4558        insn = ld_code4(env, pc) << 32;
4559        break;
4560    case 6:
4561        insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4562        break;
4563    default:
4564        abort();
4565    }
4566
4567    /* We can't actually determine the insn format until we've looked up
4568       the full insn opcode.  Which we can't do without locating the
4569       secondary opcode.  Assume by default that OP2 is at bit 40; for
4570       those smaller insns that don't actually have a secondary opcode
4571       this will correctly result in OP2 = 0. */
4572    switch (op) {
4573    case 0x01: /* E */
4574    case 0x80: /* S */
4575    case 0x82: /* S */
4576    case 0x93: /* S */
4577    case 0xb2: /* S, RRF, RRE */
4578    case 0xb3: /* RRE, RRD, RRF */
4579    case 0xb9: /* RRE, RRF */
4580    case 0xe5: /* SSE, SIL */
4581        op2 = (insn << 8) >> 56;
4582        break;
4583    case 0xa5: /* RI */
4584    case 0xa7: /* RI */
4585    case 0xc0: /* RIL */
4586    case 0xc2: /* RIL */
4587    case 0xc4: /* RIL */
4588    case 0xc6: /* RIL */
4589    case 0xc8: /* SSF */
4590    case 0xcc: /* RIL */
4591        op2 = (insn << 12) >> 60;
4592        break;
4593    case 0xd0 ... 0xdf: /* SS */
4594    case 0xe1: /* SS */
4595    case 0xe2: /* SS */
4596    case 0xe8: /* SS */
4597    case 0xe9: /* SS */
4598    case 0xea: /* SS */
4599    case 0xee ... 0xf3: /* SS */
4600    case 0xf8 ... 0xfd: /* SS */
4601        op2 = 0;
4602        break;
4603    default:
4604        op2 = (insn << 40) >> 56;
4605        break;
4606    }
4607
4608    memset(f, 0, sizeof(*f));
4609    f->op = op;
4610    f->op2 = op2;
4611
4612    /* Lookup the instruction.  */
4613    info = lookup_opc(op << 8 | op2);
4614
4615    /* If we found it, extract the operands.  */
4616    if (info != NULL) {
4617        DisasFormat fmt = info->fmt;
4618        int i;
4619
4620        for (i = 0; i < NUM_C_FIELD; ++i) {
4621            extract_field(f, &format_info[fmt].op[i], insn);
4622        }
4623    }
4624    return info;
4625}
4626
4627static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4628{
4629    const DisasInsn *insn;
4630    ExitStatus ret = NO_EXIT;
4631    DisasFields f;
4632    DisasOps o;
4633
4634    /* Search for the insn in the table.  */
4635    insn = extract_insn(env, s, &f);
4636
4637    /* Not found means unimplemented/illegal opcode.  */
4638    if (insn == NULL) {
4639        qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4640                      f.op, f.op2);
4641        gen_illegal_opcode(s);
4642        return EXIT_NORETURN;
4643    }
4644
4645    /* Check for insn specification exceptions.  */
4646    if (insn->spec) {
4647        int spec = insn->spec, excp = 0, r;
4648
4649        if (spec & SPEC_r1_even) {
4650            r = get_field(&f, r1);
4651            if (r & 1) {
4652                excp = PGM_SPECIFICATION;
4653            }
4654        }
4655        if (spec & SPEC_r2_even) {
4656            r = get_field(&f, r2);
4657            if (r & 1) {
4658                excp = PGM_SPECIFICATION;
4659            }
4660        }
4661        if (spec & SPEC_r3_even) {
4662            r = get_field(&f, r3);
4663            if (r & 1) {
4664                excp = PGM_SPECIFICATION;
4665            }
4666        }
4667        if (spec & SPEC_r1_f128) {
4668            r = get_field(&f, r1);
4669            if (r > 13) {
4670                excp = PGM_SPECIFICATION;
4671            }
4672        }
4673        if (spec & SPEC_r2_f128) {
4674            r = get_field(&f, r2);
4675            if (r > 13) {
4676                excp = PGM_SPECIFICATION;
4677            }
4678        }
4679        if (excp) {
4680            gen_program_exception(s, excp);
4681            return EXIT_NORETURN;
4682        }
4683    }
4684
4685    /* Set up the strutures we use to communicate with the helpers. */
4686    s->insn = insn;
4687    s->fields = &f;
4688    o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4689    TCGV_UNUSED_I64(o.out);
4690    TCGV_UNUSED_I64(o.out2);
4691    TCGV_UNUSED_I64(o.in1);
4692    TCGV_UNUSED_I64(o.in2);
4693    TCGV_UNUSED_I64(o.addr1);
4694
4695    /* Implement the instruction.  */
4696    if (insn->help_in1) {
4697        insn->help_in1(s, &f, &o);
4698    }
4699    if (insn->help_in2) {
4700        insn->help_in2(s, &f, &o);
4701    }
4702    if (insn->help_prep) {
4703        insn->help_prep(s, &f, &o);
4704    }
4705    if (insn->help_op) {
4706        ret = insn->help_op(s, &o);
4707    }
4708    if (insn->help_wout) {
4709        insn->help_wout(s, &f, &o);
4710    }
4711    if (insn->help_cout) {
4712        insn->help_cout(s, &o);
4713    }
4714
4715    /* Free any temporaries created by the helpers.  */
4716    if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4717        tcg_temp_free_i64(o.out);
4718    }
4719    if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4720        tcg_temp_free_i64(o.out2);
4721    }
4722    if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4723        tcg_temp_free_i64(o.in1);
4724    }
4725    if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4726        tcg_temp_free_i64(o.in2);
4727    }
4728    if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4729        tcg_temp_free_i64(o.addr1);
4730    }
4731
4732    /* Advance to the next instruction.  */
4733    s->pc = s->next_pc;
4734    return ret;
4735}
4736
4737static inline void gen_intermediate_code_internal(CPUS390XState *env,
4738                                                  TranslationBlock *tb,
4739                                                  int search_pc)
4740{
4741    DisasContext dc;
4742    target_ulong pc_start;
4743    uint64_t next_page_start;
4744    uint16_t *gen_opc_end;
4745    int j, lj = -1;
4746    int num_insns, max_insns;
4747    CPUBreakpoint *bp;
4748    ExitStatus status;
4749    bool do_debug;
4750
4751    pc_start = tb->pc;
4752
4753    /* 31-bit mode */
4754    if (!(tb->flags & FLAG_MASK_64)) {
4755        pc_start &= 0x7fffffff;
4756    }
4757
4758    dc.tb = tb;
4759    dc.pc = pc_start;
4760    dc.cc_op = CC_OP_DYNAMIC;
4761    do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4762
4763    gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4764
4765    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4766
4767    num_insns = 0;
4768    max_insns = tb->cflags & CF_COUNT_MASK;
4769    if (max_insns == 0) {
4770        max_insns = CF_COUNT_MASK;
4771    }
4772
4773    gen_tb_start();
4774
4775    do {
4776        if (search_pc) {
4777            j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4778            if (lj < j) {
4779                lj++;
4780                while (lj < j) {
4781                    tcg_ctx.gen_opc_instr_start[lj++] = 0;
4782                }
4783            }
4784            tcg_ctx.gen_opc_pc[lj] = dc.pc;
4785            gen_opc_cc_op[lj] = dc.cc_op;
4786            tcg_ctx.gen_opc_instr_start[lj] = 1;
4787            tcg_ctx.gen_opc_icount[lj] = num_insns;
4788        }
4789        if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4790            gen_io_start();
4791        }
4792
4793        if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4794            tcg_gen_debug_insn_start(dc.pc);
4795        }
4796
4797        status = NO_EXIT;
4798        if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4799            QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4800                if (bp->pc == dc.pc) {
4801                    status = EXIT_PC_STALE;
4802                    do_debug = true;
4803                    break;
4804                }
4805            }
4806        }
4807        if (status == NO_EXIT) {
4808            status = translate_one(env, &dc);
4809        }
4810
4811        /* If we reach a page boundary, are single stepping,
4812           or exhaust instruction count, stop generation.  */
4813        if (status == NO_EXIT
4814            && (dc.pc >= next_page_start
4815                || tcg_ctx.gen_opc_ptr >= gen_opc_end
4816                || num_insns >= max_insns
4817                || singlestep
4818                || env->singlestep_enabled)) {
4819            status = EXIT_PC_STALE;
4820        }
4821    } while (status == NO_EXIT);
4822
4823    if (tb->cflags & CF_LAST_IO) {
4824        gen_io_end();
4825    }
4826
4827    switch (status) {
4828    case EXIT_GOTO_TB:
4829    case EXIT_NORETURN:
4830        break;
4831    case EXIT_PC_STALE:
4832        update_psw_addr(&dc);
4833        /* FALLTHRU */
4834    case EXIT_PC_UPDATED:
4835        /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4836           cc op type is in env */
4837        update_cc_op(&dc);
4838        /* Exit the TB, either by raising a debug exception or by return.  */
4839        if (do_debug) {
4840            gen_exception(EXCP_DEBUG);
4841        } else {
4842            tcg_gen_exit_tb(0);
4843        }
4844        break;
4845    default:
4846        abort();
4847    }
4848
4849    gen_tb_end(tb, num_insns);
4850    *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4851    if (search_pc) {
4852        j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4853        lj++;
4854        while (lj <= j) {
4855            tcg_ctx.gen_opc_instr_start[lj++] = 0;
4856        }
4857    } else {
4858        tb->size = dc.pc - pc_start;
4859        tb->icount = num_insns;
4860    }
4861
4862#if defined(S390X_DEBUG_DISAS)
4863    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4864        qemu_log("IN: %s\n", lookup_symbol(pc_start));
4865        log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4866        qemu_log("\n");
4867    }
4868#endif
4869}
4870
4871void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4872{
4873    gen_intermediate_code_internal(env, tb, 0);
4874}
4875
4876void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4877{
4878    gen_intermediate_code_internal(env, tb, 1);
4879}
4880
4881void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4882{
4883    int cc_op;
4884    env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4885    cc_op = gen_opc_cc_op[pc_pos];
4886    if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4887        env->cc_op = cc_op;
4888    }
4889}
4890