qemu/target/ppc/translate.c
<<
>>
Prefs
   1/*
   2 *  PowerPC emulation for qemu: main translation routines.
   3 *
   4 *  Copyright (c) 2003-2007 Jocelyn Mayer
   5 *  Copyright (C) 2011 Freescale Semiconductor, Inc.
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "internal.h"
  24#include "disas/disas.h"
  25#include "exec/exec-all.h"
  26#include "tcg-op.h"
  27#include "tcg-op-gvec.h"
  28#include "qemu/host-utils.h"
  29#include "qemu/main-loop.h"
  30#include "exec/cpu_ldst.h"
  31
  32#include "exec/helper-proto.h"
  33#include "exec/helper-gen.h"
  34
  35#include "trace-tcg.h"
  36#include "exec/translator.h"
  37#include "exec/log.h"
  38#include "qemu/atomic128.h"
  39
  40
  41#define CPU_SINGLE_STEP 0x1
  42#define CPU_BRANCH_STEP 0x2
  43#define GDBSTUB_SINGLE_STEP 0x4
  44
  45/* Include definitions for instructions classes and implementations flags */
  46/* #define PPC_DEBUG_DISAS */
  47/* #define DO_PPC_STATISTICS */
  48
  49#ifdef PPC_DEBUG_DISAS
  50#  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
  51#else
  52#  define LOG_DISAS(...) do { } while (0)
  53#endif
  54/*****************************************************************************/
  55/* Code translation helpers                                                  */
  56
  57/* global register indexes */
  58static char cpu_reg_names[10 * 3 + 22 * 4   /* GPR */
  59                          + 10 * 4 + 22 * 5 /* SPE GPRh */
  60                          + 8 * 5           /* CRF */];
  61static TCGv cpu_gpr[32];
  62static TCGv cpu_gprh[32];
  63static TCGv_i32 cpu_crf[8];
  64static TCGv cpu_nip;
  65static TCGv cpu_msr;
  66static TCGv cpu_ctr;
  67static TCGv cpu_lr;
  68#if defined(TARGET_PPC64)
  69static TCGv cpu_cfar;
  70#endif
  71static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
  72static TCGv cpu_reserve;
  73static TCGv cpu_reserve_val;
  74static TCGv cpu_fpscr;
  75static TCGv_i32 cpu_access_type;
  76
  77#include "exec/gen-icount.h"
  78
  79void ppc_translate_init(void)
  80{
  81    int i;
  82    char *p;
  83    size_t cpu_reg_names_size;
  84
  85    p = cpu_reg_names;
  86    cpu_reg_names_size = sizeof(cpu_reg_names);
  87
  88    for (i = 0; i < 8; i++) {
  89        snprintf(p, cpu_reg_names_size, "crf%d", i);
  90        cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
  91                                            offsetof(CPUPPCState, crf[i]), p);
  92        p += 5;
  93        cpu_reg_names_size -= 5;
  94    }
  95
  96    for (i = 0; i < 32; i++) {
  97        snprintf(p, cpu_reg_names_size, "r%d", i);
  98        cpu_gpr[i] = tcg_global_mem_new(cpu_env,
  99                                        offsetof(CPUPPCState, gpr[i]), p);
 100        p += (i < 10) ? 3 : 4;
 101        cpu_reg_names_size -= (i < 10) ? 3 : 4;
 102        snprintf(p, cpu_reg_names_size, "r%dH", i);
 103        cpu_gprh[i] = tcg_global_mem_new(cpu_env,
 104                                         offsetof(CPUPPCState, gprh[i]), p);
 105        p += (i < 10) ? 4 : 5;
 106        cpu_reg_names_size -= (i < 10) ? 4 : 5;
 107    }
 108
 109    cpu_nip = tcg_global_mem_new(cpu_env,
 110                                 offsetof(CPUPPCState, nip), "nip");
 111
 112    cpu_msr = tcg_global_mem_new(cpu_env,
 113                                 offsetof(CPUPPCState, msr), "msr");
 114
 115    cpu_ctr = tcg_global_mem_new(cpu_env,
 116                                 offsetof(CPUPPCState, ctr), "ctr");
 117
 118    cpu_lr = tcg_global_mem_new(cpu_env,
 119                                offsetof(CPUPPCState, lr), "lr");
 120
 121#if defined(TARGET_PPC64)
 122    cpu_cfar = tcg_global_mem_new(cpu_env,
 123                                  offsetof(CPUPPCState, cfar), "cfar");
 124#endif
 125
 126    cpu_xer = tcg_global_mem_new(cpu_env,
 127                                 offsetof(CPUPPCState, xer), "xer");
 128    cpu_so = tcg_global_mem_new(cpu_env,
 129                                offsetof(CPUPPCState, so), "SO");
 130    cpu_ov = tcg_global_mem_new(cpu_env,
 131                                offsetof(CPUPPCState, ov), "OV");
 132    cpu_ca = tcg_global_mem_new(cpu_env,
 133                                offsetof(CPUPPCState, ca), "CA");
 134    cpu_ov32 = tcg_global_mem_new(cpu_env,
 135                                  offsetof(CPUPPCState, ov32), "OV32");
 136    cpu_ca32 = tcg_global_mem_new(cpu_env,
 137                                  offsetof(CPUPPCState, ca32), "CA32");
 138
 139    cpu_reserve = tcg_global_mem_new(cpu_env,
 140                                     offsetof(CPUPPCState, reserve_addr),
 141                                     "reserve_addr");
 142    cpu_reserve_val = tcg_global_mem_new(cpu_env,
 143                                     offsetof(CPUPPCState, reserve_val),
 144                                     "reserve_val");
 145
 146    cpu_fpscr = tcg_global_mem_new(cpu_env,
 147                                   offsetof(CPUPPCState, fpscr), "fpscr");
 148
 149    cpu_access_type = tcg_global_mem_new_i32(cpu_env,
 150                                             offsetof(CPUPPCState, access_type),
 151                                             "access_type");
 152}
 153
 154/* internal defines */
 155struct DisasContext {
 156    DisasContextBase base;
 157    uint32_t opcode;
 158    uint32_t exception;
 159    /* Routine used to access memory */
 160    bool pr, hv, dr, le_mode;
 161    bool lazy_tlb_flush;
 162    bool need_access_type;
 163    int mem_idx;
 164    int access_type;
 165    /* Translation flags */
 166    MemOp default_tcg_memop_mask;
 167#if defined(TARGET_PPC64)
 168    bool sf_mode;
 169    bool has_cfar;
 170#endif
 171    bool fpu_enabled;
 172    bool altivec_enabled;
 173    bool vsx_enabled;
 174    bool spe_enabled;
 175    bool tm_enabled;
 176    bool gtse;
 177    ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
 178    int singlestep_enabled;
 179    uint32_t flags;
 180    uint64_t insns_flags;
 181    uint64_t insns_flags2;
 182};
 183
 184/* Return true iff byteswap is needed in a scalar memop */
 185static inline bool need_byteswap(const DisasContext *ctx)
 186{
 187#if defined(TARGET_WORDS_BIGENDIAN)
 188     return ctx->le_mode;
 189#else
 190     return !ctx->le_mode;
 191#endif
 192}
 193
 194/* True when active word size < size of target_long.  */
 195#ifdef TARGET_PPC64
 196# define NARROW_MODE(C)  (!(C)->sf_mode)
 197#else
 198# define NARROW_MODE(C)  0
 199#endif
 200
 201struct opc_handler_t {
 202    /* invalid bits for instruction 1 (Rc(opcode) == 0) */
 203    uint32_t inval1;
 204    /* invalid bits for instruction 2 (Rc(opcode) == 1) */
 205    uint32_t inval2;
 206    /* instruction type */
 207    uint64_t type;
 208    /* extended instruction type */
 209    uint64_t type2;
 210    /* handler */
 211    void (*handler)(DisasContext *ctx);
 212#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
 213    const char *oname;
 214#endif
 215#if defined(DO_PPC_STATISTICS)
 216    uint64_t count;
 217#endif
 218};
 219
 220/* SPR load/store helpers */
 221static inline void gen_load_spr(TCGv t, int reg)
 222{
 223    tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
 224}
 225
 226static inline void gen_store_spr(int reg, TCGv t)
 227{
 228    tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
 229}
 230
 231static inline void gen_set_access_type(DisasContext *ctx, int access_type)
 232{
 233    if (ctx->need_access_type && ctx->access_type != access_type) {
 234        tcg_gen_movi_i32(cpu_access_type, access_type);
 235        ctx->access_type = access_type;
 236    }
 237}
 238
 239static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
 240{
 241    if (NARROW_MODE(ctx)) {
 242        nip = (uint32_t)nip;
 243    }
 244    tcg_gen_movi_tl(cpu_nip, nip);
 245}
 246
 247static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
 248{
 249    TCGv_i32 t0, t1;
 250
 251    /*
 252     * These are all synchronous exceptions, we set the PC back to the
 253     * faulting instruction
 254     */
 255    if (ctx->exception == POWERPC_EXCP_NONE) {
 256        gen_update_nip(ctx, ctx->base.pc_next - 4);
 257    }
 258    t0 = tcg_const_i32(excp);
 259    t1 = tcg_const_i32(error);
 260    gen_helper_raise_exception_err(cpu_env, t0, t1);
 261    tcg_temp_free_i32(t0);
 262    tcg_temp_free_i32(t1);
 263    ctx->exception = (excp);
 264}
 265
 266static void gen_exception(DisasContext *ctx, uint32_t excp)
 267{
 268    TCGv_i32 t0;
 269
 270    /*
 271     * These are all synchronous exceptions, we set the PC back to the
 272     * faulting instruction
 273     */
 274    if (ctx->exception == POWERPC_EXCP_NONE) {
 275        gen_update_nip(ctx, ctx->base.pc_next - 4);
 276    }
 277    t0 = tcg_const_i32(excp);
 278    gen_helper_raise_exception(cpu_env, t0);
 279    tcg_temp_free_i32(t0);
 280    ctx->exception = (excp);
 281}
 282
 283static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
 284                              target_ulong nip)
 285{
 286    TCGv_i32 t0;
 287
 288    gen_update_nip(ctx, nip);
 289    t0 = tcg_const_i32(excp);
 290    gen_helper_raise_exception(cpu_env, t0);
 291    tcg_temp_free_i32(t0);
 292    ctx->exception = (excp);
 293}
 294
 295/*
 296 * Tells the caller what is the appropriate exception to generate and prepares
 297 * SPR registers for this exception.
 298 *
 299 * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or
 300 * POWERPC_EXCP_DEBUG (on BookE).
 301 */
 302static uint32_t gen_prep_dbgex(DisasContext *ctx)
 303{
 304    if (ctx->flags & POWERPC_FLAG_DE) {
 305        target_ulong dbsr = 0;
 306        if (ctx->singlestep_enabled & CPU_SINGLE_STEP) {
 307            dbsr = DBCR0_ICMP;
 308        } else {
 309            /* Must have been branch */
 310            dbsr = DBCR0_BRT;
 311        }
 312        TCGv t0 = tcg_temp_new();
 313        gen_load_spr(t0, SPR_BOOKE_DBSR);
 314        tcg_gen_ori_tl(t0, t0, dbsr);
 315        gen_store_spr(SPR_BOOKE_DBSR, t0);
 316        tcg_temp_free(t0);
 317        return POWERPC_EXCP_DEBUG;
 318    } else {
 319        return POWERPC_EXCP_TRACE;
 320    }
 321}
 322
 323static void gen_debug_exception(DisasContext *ctx)
 324{
 325    TCGv_i32 t0;
 326
 327    /*
 328     * These are all synchronous exceptions, we set the PC back to the
 329     * faulting instruction
 330     */
 331    if ((ctx->exception != POWERPC_EXCP_BRANCH) &&
 332        (ctx->exception != POWERPC_EXCP_SYNC)) {
 333        gen_update_nip(ctx, ctx->base.pc_next);
 334    }
 335    t0 = tcg_const_i32(EXCP_DEBUG);
 336    gen_helper_raise_exception(cpu_env, t0);
 337    tcg_temp_free_i32(t0);
 338}
 339
 340static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
 341{
 342    /* Will be converted to program check if needed */
 343    gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error);
 344}
 345
 346static inline void gen_priv_exception(DisasContext *ctx, uint32_t error)
 347{
 348    gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error);
 349}
 350
 351static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error)
 352{
 353    /* Will be converted to program check if needed */
 354    gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error);
 355}
 356
 357/* Stop translation */
 358static inline void gen_stop_exception(DisasContext *ctx)
 359{
 360    gen_update_nip(ctx, ctx->base.pc_next);
 361    ctx->exception = POWERPC_EXCP_STOP;
 362}
 363
 364#ifndef CONFIG_USER_ONLY
 365/* No need to update nip here, as execution flow will change */
 366static inline void gen_sync_exception(DisasContext *ctx)
 367{
 368    ctx->exception = POWERPC_EXCP_SYNC;
 369}
 370#endif
 371
 372#define GEN_HANDLER(name, opc1, opc2, opc3, inval, type)                      \
 373GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
 374
 375#define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2)             \
 376GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
 377
 378#define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type)               \
 379GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
 380
 381#define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2)      \
 382GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
 383
 384#define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2)     \
 385GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
 386
 387#define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
 388GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
 389
 390typedef struct opcode_t {
 391    unsigned char opc1, opc2, opc3, opc4;
 392#if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
 393    unsigned char pad[4];
 394#endif
 395    opc_handler_t handler;
 396    const char *oname;
 397} opcode_t;
 398
 399/* Helpers for priv. check */
 400#define GEN_PRIV                                                \
 401    do {                                                        \
 402        gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; \
 403    } while (0)
 404
 405#if defined(CONFIG_USER_ONLY)
 406#define CHK_HV GEN_PRIV
 407#define CHK_SV GEN_PRIV
 408#define CHK_HVRM GEN_PRIV
 409#else
 410#define CHK_HV                                                          \
 411    do {                                                                \
 412        if (unlikely(ctx->pr || !ctx->hv)) {                            \
 413            GEN_PRIV;                                                   \
 414        }                                                               \
 415    } while (0)
 416#define CHK_SV                   \
 417    do {                         \
 418        if (unlikely(ctx->pr)) { \
 419            GEN_PRIV;            \
 420        }                        \
 421    } while (0)
 422#define CHK_HVRM                                            \
 423    do {                                                    \
 424        if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) {     \
 425            GEN_PRIV;                                       \
 426        }                                                   \
 427    } while (0)
 428#endif
 429
 430#define CHK_NONE
 431
 432/*****************************************************************************/
 433/* PowerPC instructions table                                                */
 434
 435#if defined(DO_PPC_STATISTICS)
 436#define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2)                    \
 437{                                                                             \
 438    .opc1 = op1,                                                              \
 439    .opc2 = op2,                                                              \
 440    .opc3 = op3,                                                              \
 441    .opc4 = 0xff,                                                             \
 442    .handler = {                                                              \
 443        .inval1  = invl,                                                      \
 444        .type = _typ,                                                         \
 445        .type2 = _typ2,                                                       \
 446        .handler = &gen_##name,                                               \
 447        .oname = stringify(name),                                             \
 448    },                                                                        \
 449    .oname = stringify(name),                                                 \
 450}
 451#define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2)       \
 452{                                                                             \
 453    .opc1 = op1,                                                              \
 454    .opc2 = op2,                                                              \
 455    .opc3 = op3,                                                              \
 456    .opc4 = 0xff,                                                             \
 457    .handler = {                                                              \
 458        .inval1  = invl1,                                                     \
 459        .inval2  = invl2,                                                     \
 460        .type = _typ,                                                         \
 461        .type2 = _typ2,                                                       \
 462        .handler = &gen_##name,                                               \
 463        .oname = stringify(name),                                             \
 464    },                                                                        \
 465    .oname = stringify(name),                                                 \
 466}
 467#define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2)             \
 468{                                                                             \
 469    .opc1 = op1,                                                              \
 470    .opc2 = op2,                                                              \
 471    .opc3 = op3,                                                              \
 472    .opc4 = 0xff,                                                             \
 473    .handler = {                                                              \
 474        .inval1  = invl,                                                      \
 475        .type = _typ,                                                         \
 476        .type2 = _typ2,                                                       \
 477        .handler = &gen_##name,                                               \
 478        .oname = onam,                                                        \
 479    },                                                                        \
 480    .oname = onam,                                                            \
 481}
 482#define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2)              \
 483{                                                                             \
 484    .opc1 = op1,                                                              \
 485    .opc2 = op2,                                                              \
 486    .opc3 = op3,                                                              \
 487    .opc4 = op4,                                                              \
 488    .handler = {                                                              \
 489        .inval1  = invl,                                                      \
 490        .type = _typ,                                                         \
 491        .type2 = _typ2,                                                       \
 492        .handler = &gen_##name,                                               \
 493        .oname = stringify(name),                                             \
 494    },                                                                        \
 495    .oname = stringify(name),                                                 \
 496}
 497#define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2)        \
 498{                                                                             \
 499    .opc1 = op1,                                                              \
 500    .opc2 = op2,                                                              \
 501    .opc3 = op3,                                                              \
 502    .opc4 = op4,                                                              \
 503    .handler = {                                                              \
 504        .inval1  = invl,                                                      \
 505        .type = _typ,                                                         \
 506        .type2 = _typ2,                                                       \
 507        .handler = &gen_##name,                                               \
 508        .oname = onam,                                                        \
 509    },                                                                        \
 510    .oname = onam,                                                            \
 511}
 512#else
 513#define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2)                    \
 514{                                                                             \
 515    .opc1 = op1,                                                              \
 516    .opc2 = op2,                                                              \
 517    .opc3 = op3,                                                              \
 518    .opc4 = 0xff,                                                             \
 519    .handler = {                                                              \
 520        .inval1  = invl,                                                      \
 521        .type = _typ,                                                         \
 522        .type2 = _typ2,                                                       \
 523        .handler = &gen_##name,                                               \
 524    },                                                                        \
 525    .oname = stringify(name),                                                 \
 526}
 527#define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2)       \
 528{                                                                             \
 529    .opc1 = op1,                                                              \
 530    .opc2 = op2,                                                              \
 531    .opc3 = op3,                                                              \
 532    .opc4 = 0xff,                                                             \
 533    .handler = {                                                              \
 534        .inval1  = invl1,                                                     \
 535        .inval2  = invl2,                                                     \
 536        .type = _typ,                                                         \
 537        .type2 = _typ2,                                                       \
 538        .handler = &gen_##name,                                               \
 539    },                                                                        \
 540    .oname = stringify(name),                                                 \
 541}
 542#define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2)             \
 543{                                                                             \
 544    .opc1 = op1,                                                              \
 545    .opc2 = op2,                                                              \
 546    .opc3 = op3,                                                              \
 547    .opc4 = 0xff,                                                             \
 548    .handler = {                                                              \
 549        .inval1  = invl,                                                      \
 550        .type = _typ,                                                         \
 551        .type2 = _typ2,                                                       \
 552        .handler = &gen_##name,                                               \
 553    },                                                                        \
 554    .oname = onam,                                                            \
 555}
 556#define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2)              \
 557{                                                                             \
 558    .opc1 = op1,                                                              \
 559    .opc2 = op2,                                                              \
 560    .opc3 = op3,                                                              \
 561    .opc4 = op4,                                                              \
 562    .handler = {                                                              \
 563        .inval1  = invl,                                                      \
 564        .type = _typ,                                                         \
 565        .type2 = _typ2,                                                       \
 566        .handler = &gen_##name,                                               \
 567    },                                                                        \
 568    .oname = stringify(name),                                                 \
 569}
 570#define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2)        \
 571{                                                                             \
 572    .opc1 = op1,                                                              \
 573    .opc2 = op2,                                                              \
 574    .opc3 = op3,                                                              \
 575    .opc4 = op4,                                                              \
 576    .handler = {                                                              \
 577        .inval1  = invl,                                                      \
 578        .type = _typ,                                                         \
 579        .type2 = _typ2,                                                       \
 580        .handler = &gen_##name,                                               \
 581    },                                                                        \
 582    .oname = onam,                                                            \
 583}
 584#endif
 585
 586/* Invalid instruction */
 587static void gen_invalid(DisasContext *ctx)
 588{
 589    gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
 590}
 591
 592static opc_handler_t invalid_handler = {
 593    .inval1  = 0xFFFFFFFF,
 594    .inval2  = 0xFFFFFFFF,
 595    .type    = PPC_NONE,
 596    .type2   = PPC_NONE,
 597    .handler = gen_invalid,
 598};
 599
 600/***                           Integer comparison                          ***/
 601
 602static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
 603{
 604    TCGv t0 = tcg_temp_new();
 605    TCGv t1 = tcg_temp_new();
 606    TCGv_i32 t = tcg_temp_new_i32();
 607
 608    tcg_gen_movi_tl(t0, CRF_EQ);
 609    tcg_gen_movi_tl(t1, CRF_LT);
 610    tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU),
 611                       t0, arg0, arg1, t1, t0);
 612    tcg_gen_movi_tl(t1, CRF_GT);
 613    tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU),
 614                       t0, arg0, arg1, t1, t0);
 615
 616    tcg_gen_trunc_tl_i32(t, t0);
 617    tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
 618    tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t);
 619
 620    tcg_temp_free(t0);
 621    tcg_temp_free(t1);
 622    tcg_temp_free_i32(t);
 623}
 624
 625static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
 626{
 627    TCGv t0 = tcg_const_tl(arg1);
 628    gen_op_cmp(arg0, t0, s, crf);
 629    tcg_temp_free(t0);
 630}
 631
 632static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
 633{
 634    TCGv t0, t1;
 635    t0 = tcg_temp_new();
 636    t1 = tcg_temp_new();
 637    if (s) {
 638        tcg_gen_ext32s_tl(t0, arg0);
 639        tcg_gen_ext32s_tl(t1, arg1);
 640    } else {
 641        tcg_gen_ext32u_tl(t0, arg0);
 642        tcg_gen_ext32u_tl(t1, arg1);
 643    }
 644    gen_op_cmp(t0, t1, s, crf);
 645    tcg_temp_free(t1);
 646    tcg_temp_free(t0);
 647}
 648
 649static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
 650{
 651    TCGv t0 = tcg_const_tl(arg1);
 652    gen_op_cmp32(arg0, t0, s, crf);
 653    tcg_temp_free(t0);
 654}
 655
 656static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
 657{
 658    if (NARROW_MODE(ctx)) {
 659        gen_op_cmpi32(reg, 0, 1, 0);
 660    } else {
 661        gen_op_cmpi(reg, 0, 1, 0);
 662    }
 663}
 664
 665/* cmp */
 666static void gen_cmp(DisasContext *ctx)
 667{
 668    if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
 669        gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
 670                   1, crfD(ctx->opcode));
 671    } else {
 672        gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
 673                     1, crfD(ctx->opcode));
 674    }
 675}
 676
 677/* cmpi */
 678static void gen_cmpi(DisasContext *ctx)
 679{
 680    if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
 681        gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
 682                    1, crfD(ctx->opcode));
 683    } else {
 684        gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
 685                      1, crfD(ctx->opcode));
 686    }
 687}
 688
 689/* cmpl */
 690static void gen_cmpl(DisasContext *ctx)
 691{
 692    if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
 693        gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
 694                   0, crfD(ctx->opcode));
 695    } else {
 696        gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
 697                     0, crfD(ctx->opcode));
 698    }
 699}
 700
 701/* cmpli */
 702static void gen_cmpli(DisasContext *ctx)
 703{
 704    if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
 705        gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
 706                    0, crfD(ctx->opcode));
 707    } else {
 708        gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
 709                      0, crfD(ctx->opcode));
 710    }
 711}
 712
 713/* cmprb - range comparison: isupper, isaplha, islower*/
 714static void gen_cmprb(DisasContext *ctx)
 715{
 716    TCGv_i32 src1 = tcg_temp_new_i32();
 717    TCGv_i32 src2 = tcg_temp_new_i32();
 718    TCGv_i32 src2lo = tcg_temp_new_i32();
 719    TCGv_i32 src2hi = tcg_temp_new_i32();
 720    TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)];
 721
 722    tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]);
 723    tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]);
 724
 725    tcg_gen_andi_i32(src1, src1, 0xFF);
 726    tcg_gen_ext8u_i32(src2lo, src2);
 727    tcg_gen_shri_i32(src2, src2, 8);
 728    tcg_gen_ext8u_i32(src2hi, src2);
 729
 730    tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
 731    tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
 732    tcg_gen_and_i32(crf, src2lo, src2hi);
 733
 734    if (ctx->opcode & 0x00200000) {
 735        tcg_gen_shri_i32(src2, src2, 8);
 736        tcg_gen_ext8u_i32(src2lo, src2);
 737        tcg_gen_shri_i32(src2, src2, 8);
 738        tcg_gen_ext8u_i32(src2hi, src2);
 739        tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
 740        tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
 741        tcg_gen_and_i32(src2lo, src2lo, src2hi);
 742        tcg_gen_or_i32(crf, crf, src2lo);
 743    }
 744    tcg_gen_shli_i32(crf, crf, CRF_GT_BIT);
 745    tcg_temp_free_i32(src1);
 746    tcg_temp_free_i32(src2);
 747    tcg_temp_free_i32(src2lo);
 748    tcg_temp_free_i32(src2hi);
 749}
 750
 751#if defined(TARGET_PPC64)
 752/* cmpeqb */
 753static void gen_cmpeqb(DisasContext *ctx)
 754{
 755    gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
 756                      cpu_gpr[rB(ctx->opcode)]);
 757}
 758#endif
 759
 760/* isel (PowerPC 2.03 specification) */
 761static void gen_isel(DisasContext *ctx)
 762{
 763    uint32_t bi = rC(ctx->opcode);
 764    uint32_t mask = 0x08 >> (bi & 0x03);
 765    TCGv t0 = tcg_temp_new();
 766    TCGv zr;
 767
 768    tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
 769    tcg_gen_andi_tl(t0, t0, mask);
 770
 771    zr = tcg_const_tl(0);
 772    tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
 773                       rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
 774                       cpu_gpr[rB(ctx->opcode)]);
 775    tcg_temp_free(zr);
 776    tcg_temp_free(t0);
 777}
 778
 779/* cmpb: PowerPC 2.05 specification */
 780static void gen_cmpb(DisasContext *ctx)
 781{
 782    gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
 783                    cpu_gpr[rB(ctx->opcode)]);
 784}
 785
 786/***                           Integer arithmetic                          ***/
 787
 788static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
 789                                           TCGv arg1, TCGv arg2, int sub)
 790{
 791    TCGv t0 = tcg_temp_new();
 792
 793    tcg_gen_xor_tl(cpu_ov, arg0, arg2);
 794    tcg_gen_xor_tl(t0, arg1, arg2);
 795    if (sub) {
 796        tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
 797    } else {
 798        tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
 799    }
 800    tcg_temp_free(t0);
 801    if (NARROW_MODE(ctx)) {
 802        tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1);
 803        if (is_isa300(ctx)) {
 804            tcg_gen_mov_tl(cpu_ov32, cpu_ov);
 805        }
 806    } else {
 807        if (is_isa300(ctx)) {
 808            tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1);
 809        }
 810        tcg_gen_extract_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1);
 811    }
 812    tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
 813}
 814
 815static inline void gen_op_arith_compute_ca32(DisasContext *ctx,
 816                                             TCGv res, TCGv arg0, TCGv arg1,
 817                                             TCGv ca32, int sub)
 818{
 819    TCGv t0;
 820
 821    if (!is_isa300(ctx)) {
 822        return;
 823    }
 824
 825    t0 = tcg_temp_new();
 826    if (sub) {
 827        tcg_gen_eqv_tl(t0, arg0, arg1);
 828    } else {
 829        tcg_gen_xor_tl(t0, arg0, arg1);
 830    }
 831    tcg_gen_xor_tl(t0, t0, res);
 832    tcg_gen_extract_tl(ca32, t0, 32, 1);
 833    tcg_temp_free(t0);
 834}
 835
 836/* Common add function */
 837static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
 838                                    TCGv arg2, TCGv ca, TCGv ca32,
 839                                    bool add_ca, bool compute_ca,
 840                                    bool compute_ov, bool compute_rc0)
 841{
 842    TCGv t0 = ret;
 843
 844    if (compute_ca || compute_ov) {
 845        t0 = tcg_temp_new();
 846    }
 847
 848    if (compute_ca) {
 849        if (NARROW_MODE(ctx)) {
 850            /*
 851             * Caution: a non-obvious corner case of the spec is that
 852             * we must produce the *entire* 64-bit addition, but
 853             * produce the carry into bit 32.
 854             */
 855            TCGv t1 = tcg_temp_new();
 856            tcg_gen_xor_tl(t1, arg1, arg2);        /* add without carry */
 857            tcg_gen_add_tl(t0, arg1, arg2);
 858            if (add_ca) {
 859                tcg_gen_add_tl(t0, t0, ca);
 860            }
 861            tcg_gen_xor_tl(ca, t0, t1);        /* bits changed w/ carry */
 862            tcg_temp_free(t1);
 863            tcg_gen_extract_tl(ca, ca, 32, 1);
 864            if (is_isa300(ctx)) {
 865                tcg_gen_mov_tl(ca32, ca);
 866            }
 867        } else {
 868            TCGv zero = tcg_const_tl(0);
 869            if (add_ca) {
 870                tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
 871                tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
 872            } else {
 873                tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero);
 874            }
 875            gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0);
 876            tcg_temp_free(zero);
 877        }
 878    } else {
 879        tcg_gen_add_tl(t0, arg1, arg2);
 880        if (add_ca) {
 881            tcg_gen_add_tl(t0, t0, ca);
 882        }
 883    }
 884
 885    if (compute_ov) {
 886        gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
 887    }
 888    if (unlikely(compute_rc0)) {
 889        gen_set_Rc0(ctx, t0);
 890    }
 891
 892    if (t0 != ret) {
 893        tcg_gen_mov_tl(ret, t0);
 894        tcg_temp_free(t0);
 895    }
 896}
 897/* Add functions with two operands */
 898#define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov)     \
 899static void glue(gen_, name)(DisasContext *ctx)                               \
 900{                                                                             \
 901    gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)],                           \
 902                     cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],      \
 903                     ca, glue(ca, 32),                                        \
 904                     add_ca, compute_ca, compute_ov, Rc(ctx->opcode));        \
 905}
 906/* Add functions with one operand and one immediate */
 907#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca,                    \
 908                                add_ca, compute_ca, compute_ov)               \
 909static void glue(gen_, name)(DisasContext *ctx)                               \
 910{                                                                             \
 911    TCGv t0 = tcg_const_tl(const_val);                                        \
 912    gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)],                           \
 913                     cpu_gpr[rA(ctx->opcode)], t0,                            \
 914                     ca, glue(ca, 32),                                        \
 915                     add_ca, compute_ca, compute_ov, Rc(ctx->opcode));        \
 916    tcg_temp_free(t0);                                                        \
 917}
 918
 919/* add  add.  addo  addo. */
 920GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0)
 921GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1)
 922/* addc  addc.  addco  addco. */
 923GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0)
 924GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1)
 925/* adde  adde.  addeo  addeo. */
 926GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0)
 927GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1)
 928/* addme  addme.  addmeo  addmeo.  */
 929GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0)
 930GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1)
 931/* addex */
 932GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0);
 933/* addze  addze.  addzeo  addzeo.*/
 934GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0)
 935GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1)
 936/* addi */
 937static void gen_addi(DisasContext *ctx)
 938{
 939    target_long simm = SIMM(ctx->opcode);
 940
 941    if (rA(ctx->opcode) == 0) {
 942        /* li case */
 943        tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm);
 944    } else {
 945        tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
 946                        cpu_gpr[rA(ctx->opcode)], simm);
 947    }
 948}
 949/* addic  addic.*/
 950static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
 951{
 952    TCGv c = tcg_const_tl(SIMM(ctx->opcode));
 953    gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
 954                     c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0);
 955    tcg_temp_free(c);
 956}
 957
 958static void gen_addic(DisasContext *ctx)
 959{
 960    gen_op_addic(ctx, 0);
 961}
 962
 963static void gen_addic_(DisasContext *ctx)
 964{
 965    gen_op_addic(ctx, 1);
 966}
 967
 968/* addis */
 969static void gen_addis(DisasContext *ctx)
 970{
 971    target_long simm = SIMM(ctx->opcode);
 972
 973    if (rA(ctx->opcode) == 0) {
 974        /* lis case */
 975        tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16);
 976    } else {
 977        tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
 978                        cpu_gpr[rA(ctx->opcode)], simm << 16);
 979    }
 980}
 981
 982/* addpcis */
 983static void gen_addpcis(DisasContext *ctx)
 984{
 985    target_long d = DX(ctx->opcode);
 986
 987    tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], ctx->base.pc_next + (d << 16));
 988}
 989
 990static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
 991                                     TCGv arg2, int sign, int compute_ov)
 992{
 993    TCGv_i32 t0 = tcg_temp_new_i32();
 994    TCGv_i32 t1 = tcg_temp_new_i32();
 995    TCGv_i32 t2 = tcg_temp_new_i32();
 996    TCGv_i32 t3 = tcg_temp_new_i32();
 997
 998    tcg_gen_trunc_tl_i32(t0, arg1);
 999    tcg_gen_trunc_tl_i32(t1, arg2);
1000    if (sign) {
1001        tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1002        tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1003        tcg_gen_and_i32(t2, t2, t3);
1004        tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1005        tcg_gen_or_i32(t2, t2, t3);
1006        tcg_gen_movi_i32(t3, 0);
1007        tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1008        tcg_gen_div_i32(t3, t0, t1);
1009        tcg_gen_extu_i32_tl(ret, t3);
1010    } else {
1011        tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
1012        tcg_gen_movi_i32(t3, 0);
1013        tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1014        tcg_gen_divu_i32(t3, t0, t1);
1015        tcg_gen_extu_i32_tl(ret, t3);
1016    }
1017    if (compute_ov) {
1018        tcg_gen_extu_i32_tl(cpu_ov, t2);
1019        if (is_isa300(ctx)) {
1020            tcg_gen_extu_i32_tl(cpu_ov32, t2);
1021        }
1022        tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1023    }
1024    tcg_temp_free_i32(t0);
1025    tcg_temp_free_i32(t1);
1026    tcg_temp_free_i32(t2);
1027    tcg_temp_free_i32(t3);
1028
1029    if (unlikely(Rc(ctx->opcode) != 0)) {
1030        gen_set_Rc0(ctx, ret);
1031    }
1032}
1033/* Div functions */
1034#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov)                      \
1035static void glue(gen_, name)(DisasContext *ctx)                               \
1036{                                                                             \
1037    gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)],                          \
1038                     cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],      \
1039                     sign, compute_ov);                                       \
1040}
1041/* divwu  divwu.  divwuo  divwuo.   */
1042GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
1043GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
1044/* divw  divw.  divwo  divwo.   */
1045GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
1046GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
1047
1048/* div[wd]eu[o][.] */
1049#define GEN_DIVE(name, hlpr, compute_ov)                                      \
1050static void gen_##name(DisasContext *ctx)                                     \
1051{                                                                             \
1052    TCGv_i32 t0 = tcg_const_i32(compute_ov);                                  \
1053    gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env,                      \
1054                     cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1055    tcg_temp_free_i32(t0);                                                    \
1056    if (unlikely(Rc(ctx->opcode) != 0)) {                                     \
1057        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);                           \
1058    }                                                                         \
1059}
1060
1061GEN_DIVE(divweu, divweu, 0);
1062GEN_DIVE(divweuo, divweu, 1);
1063GEN_DIVE(divwe, divwe, 0);
1064GEN_DIVE(divweo, divwe, 1);
1065
1066#if defined(TARGET_PPC64)
1067static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1068                                     TCGv arg2, int sign, int compute_ov)
1069{
1070    TCGv_i64 t0 = tcg_temp_new_i64();
1071    TCGv_i64 t1 = tcg_temp_new_i64();
1072    TCGv_i64 t2 = tcg_temp_new_i64();
1073    TCGv_i64 t3 = tcg_temp_new_i64();
1074
1075    tcg_gen_mov_i64(t0, arg1);
1076    tcg_gen_mov_i64(t1, arg2);
1077    if (sign) {
1078        tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1079        tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1080        tcg_gen_and_i64(t2, t2, t3);
1081        tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1082        tcg_gen_or_i64(t2, t2, t3);
1083        tcg_gen_movi_i64(t3, 0);
1084        tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1085        tcg_gen_div_i64(ret, t0, t1);
1086    } else {
1087        tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
1088        tcg_gen_movi_i64(t3, 0);
1089        tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1090        tcg_gen_divu_i64(ret, t0, t1);
1091    }
1092    if (compute_ov) {
1093        tcg_gen_mov_tl(cpu_ov, t2);
1094        if (is_isa300(ctx)) {
1095            tcg_gen_mov_tl(cpu_ov32, t2);
1096        }
1097        tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1098    }
1099    tcg_temp_free_i64(t0);
1100    tcg_temp_free_i64(t1);
1101    tcg_temp_free_i64(t2);
1102    tcg_temp_free_i64(t3);
1103
1104    if (unlikely(Rc(ctx->opcode) != 0)) {
1105        gen_set_Rc0(ctx, ret);
1106    }
1107}
1108
1109#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov)                      \
1110static void glue(gen_, name)(DisasContext *ctx)                               \
1111{                                                                             \
1112    gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)],                          \
1113                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],     \
1114                      sign, compute_ov);                                      \
1115}
1116/* divdu  divdu.  divduo  divduo.   */
1117GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1118GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1119/* divd  divd.  divdo  divdo.   */
1120GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1121GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1122
1123GEN_DIVE(divdeu, divdeu, 0);
1124GEN_DIVE(divdeuo, divdeu, 1);
1125GEN_DIVE(divde, divde, 0);
1126GEN_DIVE(divdeo, divde, 1);
1127#endif
1128
1129static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
1130                                     TCGv arg2, int sign)
1131{
1132    TCGv_i32 t0 = tcg_temp_new_i32();
1133    TCGv_i32 t1 = tcg_temp_new_i32();
1134
1135    tcg_gen_trunc_tl_i32(t0, arg1);
1136    tcg_gen_trunc_tl_i32(t1, arg2);
1137    if (sign) {
1138        TCGv_i32 t2 = tcg_temp_new_i32();
1139        TCGv_i32 t3 = tcg_temp_new_i32();
1140        tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1141        tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1142        tcg_gen_and_i32(t2, t2, t3);
1143        tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1144        tcg_gen_or_i32(t2, t2, t3);
1145        tcg_gen_movi_i32(t3, 0);
1146        tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1147        tcg_gen_rem_i32(t3, t0, t1);
1148        tcg_gen_ext_i32_tl(ret, t3);
1149        tcg_temp_free_i32(t2);
1150        tcg_temp_free_i32(t3);
1151    } else {
1152        TCGv_i32 t2 = tcg_const_i32(1);
1153        TCGv_i32 t3 = tcg_const_i32(0);
1154        tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
1155        tcg_gen_remu_i32(t3, t0, t1);
1156        tcg_gen_extu_i32_tl(ret, t3);
1157        tcg_temp_free_i32(t2);
1158        tcg_temp_free_i32(t3);
1159    }
1160    tcg_temp_free_i32(t0);
1161    tcg_temp_free_i32(t1);
1162}
1163
1164#define GEN_INT_ARITH_MODW(name, opc3, sign)                                \
1165static void glue(gen_, name)(DisasContext *ctx)                             \
1166{                                                                           \
1167    gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)],                        \
1168                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],   \
1169                      sign);                                                \
1170}
1171
1172GEN_INT_ARITH_MODW(moduw, 0x08, 0);
1173GEN_INT_ARITH_MODW(modsw, 0x18, 1);
1174
1175#if defined(TARGET_PPC64)
1176static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
1177                                     TCGv arg2, int sign)
1178{
1179    TCGv_i64 t0 = tcg_temp_new_i64();
1180    TCGv_i64 t1 = tcg_temp_new_i64();
1181
1182    tcg_gen_mov_i64(t0, arg1);
1183    tcg_gen_mov_i64(t1, arg2);
1184    if (sign) {
1185        TCGv_i64 t2 = tcg_temp_new_i64();
1186        TCGv_i64 t3 = tcg_temp_new_i64();
1187        tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1188        tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1189        tcg_gen_and_i64(t2, t2, t3);
1190        tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1191        tcg_gen_or_i64(t2, t2, t3);
1192        tcg_gen_movi_i64(t3, 0);
1193        tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1194        tcg_gen_rem_i64(ret, t0, t1);
1195        tcg_temp_free_i64(t2);
1196        tcg_temp_free_i64(t3);
1197    } else {
1198        TCGv_i64 t2 = tcg_const_i64(1);
1199        TCGv_i64 t3 = tcg_const_i64(0);
1200        tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
1201        tcg_gen_remu_i64(ret, t0, t1);
1202        tcg_temp_free_i64(t2);
1203        tcg_temp_free_i64(t3);
1204    }
1205    tcg_temp_free_i64(t0);
1206    tcg_temp_free_i64(t1);
1207}
1208
1209#define GEN_INT_ARITH_MODD(name, opc3, sign)                            \
1210static void glue(gen_, name)(DisasContext *ctx)                           \
1211{                                                                         \
1212  gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)],                        \
1213                    cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],   \
1214                    sign);                                                \
1215}
1216
1217GEN_INT_ARITH_MODD(modud, 0x08, 0);
1218GEN_INT_ARITH_MODD(modsd, 0x18, 1);
1219#endif
1220
1221/* mulhw  mulhw. */
1222static void gen_mulhw(DisasContext *ctx)
1223{
1224    TCGv_i32 t0 = tcg_temp_new_i32();
1225    TCGv_i32 t1 = tcg_temp_new_i32();
1226
1227    tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1228    tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1229    tcg_gen_muls2_i32(t0, t1, t0, t1);
1230    tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1231    tcg_temp_free_i32(t0);
1232    tcg_temp_free_i32(t1);
1233    if (unlikely(Rc(ctx->opcode) != 0)) {
1234        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1235    }
1236}
1237
1238/* mulhwu  mulhwu.  */
1239static void gen_mulhwu(DisasContext *ctx)
1240{
1241    TCGv_i32 t0 = tcg_temp_new_i32();
1242    TCGv_i32 t1 = tcg_temp_new_i32();
1243
1244    tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1245    tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1246    tcg_gen_mulu2_i32(t0, t1, t0, t1);
1247    tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1248    tcg_temp_free_i32(t0);
1249    tcg_temp_free_i32(t1);
1250    if (unlikely(Rc(ctx->opcode) != 0)) {
1251        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1252    }
1253}
1254
1255/* mullw  mullw. */
1256static void gen_mullw(DisasContext *ctx)
1257{
1258#if defined(TARGET_PPC64)
1259    TCGv_i64 t0, t1;
1260    t0 = tcg_temp_new_i64();
1261    t1 = tcg_temp_new_i64();
1262    tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
1263    tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
1264    tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1265    tcg_temp_free(t0);
1266    tcg_temp_free(t1);
1267#else
1268    tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1269                    cpu_gpr[rB(ctx->opcode)]);
1270#endif
1271    if (unlikely(Rc(ctx->opcode) != 0)) {
1272        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1273    }
1274}
1275
1276/* mullwo  mullwo. */
1277static void gen_mullwo(DisasContext *ctx)
1278{
1279    TCGv_i32 t0 = tcg_temp_new_i32();
1280    TCGv_i32 t1 = tcg_temp_new_i32();
1281
1282    tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1283    tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1284    tcg_gen_muls2_i32(t0, t1, t0, t1);
1285#if defined(TARGET_PPC64)
1286    tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1287#else
1288    tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
1289#endif
1290
1291    tcg_gen_sari_i32(t0, t0, 31);
1292    tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
1293    tcg_gen_extu_i32_tl(cpu_ov, t0);
1294    if (is_isa300(ctx)) {
1295        tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1296    }
1297    tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1298
1299    tcg_temp_free_i32(t0);
1300    tcg_temp_free_i32(t1);
1301    if (unlikely(Rc(ctx->opcode) != 0)) {
1302        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1303    }
1304}
1305
1306/* mulli */
1307static void gen_mulli(DisasContext *ctx)
1308{
1309    tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1310                    SIMM(ctx->opcode));
1311}
1312
1313#if defined(TARGET_PPC64)
1314/* mulhd  mulhd. */
1315static void gen_mulhd(DisasContext *ctx)
1316{
1317    TCGv lo = tcg_temp_new();
1318    tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1319                     cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1320    tcg_temp_free(lo);
1321    if (unlikely(Rc(ctx->opcode) != 0)) {
1322        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1323    }
1324}
1325
1326/* mulhdu  mulhdu. */
1327static void gen_mulhdu(DisasContext *ctx)
1328{
1329    TCGv lo = tcg_temp_new();
1330    tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1331                     cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1332    tcg_temp_free(lo);
1333    if (unlikely(Rc(ctx->opcode) != 0)) {
1334        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1335    }
1336}
1337
1338/* mulld  mulld. */
1339static void gen_mulld(DisasContext *ctx)
1340{
1341    tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1342                   cpu_gpr[rB(ctx->opcode)]);
1343    if (unlikely(Rc(ctx->opcode) != 0)) {
1344        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1345    }
1346}
1347
1348/* mulldo  mulldo. */
1349static void gen_mulldo(DisasContext *ctx)
1350{
1351    TCGv_i64 t0 = tcg_temp_new_i64();
1352    TCGv_i64 t1 = tcg_temp_new_i64();
1353
1354    tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
1355                      cpu_gpr[rB(ctx->opcode)]);
1356    tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
1357
1358    tcg_gen_sari_i64(t0, t0, 63);
1359    tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
1360    if (is_isa300(ctx)) {
1361        tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1362    }
1363    tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1364
1365    tcg_temp_free_i64(t0);
1366    tcg_temp_free_i64(t1);
1367
1368    if (unlikely(Rc(ctx->opcode) != 0)) {
1369        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1370    }
1371}
1372#endif
1373
1374/* Common subf function */
1375static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
1376                                     TCGv arg2, bool add_ca, bool compute_ca,
1377                                     bool compute_ov, bool compute_rc0)
1378{
1379    TCGv t0 = ret;
1380
1381    if (compute_ca || compute_ov) {
1382        t0 = tcg_temp_new();
1383    }
1384
1385    if (compute_ca) {
1386        /* dest = ~arg1 + arg2 [+ ca].  */
1387        if (NARROW_MODE(ctx)) {
1388            /*
1389             * Caution: a non-obvious corner case of the spec is that
1390             * we must produce the *entire* 64-bit addition, but
1391             * produce the carry into bit 32.
1392             */
1393            TCGv inv1 = tcg_temp_new();
1394            TCGv t1 = tcg_temp_new();
1395            tcg_gen_not_tl(inv1, arg1);
1396            if (add_ca) {
1397                tcg_gen_add_tl(t0, arg2, cpu_ca);
1398            } else {
1399                tcg_gen_addi_tl(t0, arg2, 1);
1400            }
1401            tcg_gen_xor_tl(t1, arg2, inv1);         /* add without carry */
1402            tcg_gen_add_tl(t0, t0, inv1);
1403            tcg_temp_free(inv1);
1404            tcg_gen_xor_tl(cpu_ca, t0, t1);         /* bits changes w/ carry */
1405            tcg_temp_free(t1);
1406            tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1);
1407            if (is_isa300(ctx)) {
1408                tcg_gen_mov_tl(cpu_ca32, cpu_ca);
1409            }
1410        } else if (add_ca) {
1411            TCGv zero, inv1 = tcg_temp_new();
1412            tcg_gen_not_tl(inv1, arg1);
1413            zero = tcg_const_tl(0);
1414            tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
1415            tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
1416            gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
1417            tcg_temp_free(zero);
1418            tcg_temp_free(inv1);
1419        } else {
1420            tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
1421            tcg_gen_sub_tl(t0, arg2, arg1);
1422            gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1);
1423        }
1424    } else if (add_ca) {
1425        /*
1426         * Since we're ignoring carry-out, we can simplify the
1427         * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1.
1428         */
1429        tcg_gen_sub_tl(t0, arg2, arg1);
1430        tcg_gen_add_tl(t0, t0, cpu_ca);
1431        tcg_gen_subi_tl(t0, t0, 1);
1432    } else {
1433        tcg_gen_sub_tl(t0, arg2, arg1);
1434    }
1435
1436    if (compute_ov) {
1437        gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
1438    }
1439    if (unlikely(compute_rc0)) {
1440        gen_set_Rc0(ctx, t0);
1441    }
1442
1443    if (t0 != ret) {
1444        tcg_gen_mov_tl(ret, t0);
1445        tcg_temp_free(t0);
1446    }
1447}
1448/* Sub functions with Two operands functions */
1449#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov)        \
1450static void glue(gen_, name)(DisasContext *ctx)                               \
1451{                                                                             \
1452    gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)],                          \
1453                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],     \
1454                      add_ca, compute_ca, compute_ov, Rc(ctx->opcode));       \
1455}
1456/* Sub functions with one operand and one immediate */
1457#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val,                       \
1458                                add_ca, compute_ca, compute_ov)               \
1459static void glue(gen_, name)(DisasContext *ctx)                               \
1460{                                                                             \
1461    TCGv t0 = tcg_const_tl(const_val);                                        \
1462    gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)],                          \
1463                      cpu_gpr[rA(ctx->opcode)], t0,                           \
1464                      add_ca, compute_ca, compute_ov, Rc(ctx->opcode));       \
1465    tcg_temp_free(t0);                                                        \
1466}
1467/* subf  subf.  subfo  subfo. */
1468GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
1469GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
1470/* subfc  subfc.  subfco  subfco. */
1471GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
1472GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
1473/* subfe  subfe.  subfeo  subfo. */
1474GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
1475GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
1476/* subfme  subfme.  subfmeo  subfmeo.  */
1477GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
1478GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
1479/* subfze  subfze.  subfzeo  subfzeo.*/
1480GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
1481GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
1482
1483/* subfic */
1484static void gen_subfic(DisasContext *ctx)
1485{
1486    TCGv c = tcg_const_tl(SIMM(ctx->opcode));
1487    gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1488                      c, 0, 1, 0, 0);
1489    tcg_temp_free(c);
1490}
1491
1492/* neg neg. nego nego. */
1493static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
1494{
1495    TCGv zero = tcg_const_tl(0);
1496    gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1497                      zero, 0, 0, compute_ov, Rc(ctx->opcode));
1498    tcg_temp_free(zero);
1499}
1500
1501static void gen_neg(DisasContext *ctx)
1502{
1503    tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
1504    if (unlikely(Rc(ctx->opcode))) {
1505        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1506    }
1507}
1508
1509static void gen_nego(DisasContext *ctx)
1510{
1511    gen_op_arith_neg(ctx, 1);
1512}
1513
1514/***                            Integer logical                            ***/
1515#define GEN_LOGICAL2(name, tcg_op, opc, type)                                 \
1516static void glue(gen_, name)(DisasContext *ctx)                               \
1517{                                                                             \
1518    tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],                \
1519       cpu_gpr[rB(ctx->opcode)]);                                             \
1520    if (unlikely(Rc(ctx->opcode) != 0))                                       \
1521        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);                           \
1522}
1523
1524#define GEN_LOGICAL1(name, tcg_op, opc, type)                                 \
1525static void glue(gen_, name)(DisasContext *ctx)                               \
1526{                                                                             \
1527    tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);               \
1528    if (unlikely(Rc(ctx->opcode) != 0))                                       \
1529        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);                           \
1530}
1531
1532/* and & and. */
1533GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
1534/* andc & andc. */
1535GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
1536
1537/* andi. */
1538static void gen_andi_(DisasContext *ctx)
1539{
1540    tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
1541                    UIMM(ctx->opcode));
1542    gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1543}
1544
1545/* andis. */
1546static void gen_andis_(DisasContext *ctx)
1547{
1548    tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
1549                    UIMM(ctx->opcode) << 16);
1550    gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1551}
1552
1553/* cntlzw */
1554static void gen_cntlzw(DisasContext *ctx)
1555{
1556    TCGv_i32 t = tcg_temp_new_i32();
1557
1558    tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
1559    tcg_gen_clzi_i32(t, t, 32);
1560    tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
1561    tcg_temp_free_i32(t);
1562
1563    if (unlikely(Rc(ctx->opcode) != 0)) {
1564        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1565    }
1566}
1567
1568/* cnttzw */
1569static void gen_cnttzw(DisasContext *ctx)
1570{
1571    TCGv_i32 t = tcg_temp_new_i32();
1572
1573    tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
1574    tcg_gen_ctzi_i32(t, t, 32);
1575    tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
1576    tcg_temp_free_i32(t);
1577
1578    if (unlikely(Rc(ctx->opcode) != 0)) {
1579        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1580    }
1581}
1582
1583/* eqv & eqv. */
1584GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
1585/* extsb & extsb. */
1586GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
1587/* extsh & extsh. */
1588GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
1589/* nand & nand. */
1590GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
1591/* nor & nor. */
1592GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
1593
1594#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
1595static void gen_pause(DisasContext *ctx)
1596{
1597    TCGv_i32 t0 = tcg_const_i32(0);
1598    tcg_gen_st_i32(t0, cpu_env,
1599                   -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
1600    tcg_temp_free_i32(t0);
1601
1602    /* Stop translation, this gives other CPUs a chance to run */
1603    gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
1604}
1605#endif /* defined(TARGET_PPC64) */
1606
1607/* or & or. */
1608static void gen_or(DisasContext *ctx)
1609{
1610    int rs, ra, rb;
1611
1612    rs = rS(ctx->opcode);
1613    ra = rA(ctx->opcode);
1614    rb = rB(ctx->opcode);
1615    /* Optimisation for mr. ri case */
1616    if (rs != ra || rs != rb) {
1617        if (rs != rb) {
1618            tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
1619        } else {
1620            tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
1621        }
1622        if (unlikely(Rc(ctx->opcode) != 0)) {
1623            gen_set_Rc0(ctx, cpu_gpr[ra]);
1624        }
1625    } else if (unlikely(Rc(ctx->opcode) != 0)) {
1626        gen_set_Rc0(ctx, cpu_gpr[rs]);
1627#if defined(TARGET_PPC64)
1628    } else if (rs != 0) { /* 0 is nop */
1629        int prio = 0;
1630
1631        switch (rs) {
1632        case 1:
1633            /* Set process priority to low */
1634            prio = 2;
1635            break;
1636        case 6:
1637            /* Set process priority to medium-low */
1638            prio = 3;
1639            break;
1640        case 2:
1641            /* Set process priority to normal */
1642            prio = 4;
1643            break;
1644#if !defined(CONFIG_USER_ONLY)
1645        case 31:
1646            if (!ctx->pr) {
1647                /* Set process priority to very low */
1648                prio = 1;
1649            }
1650            break;
1651        case 5:
1652            if (!ctx->pr) {
1653                /* Set process priority to medium-hight */
1654                prio = 5;
1655            }
1656            break;
1657        case 3:
1658            if (!ctx->pr) {
1659                /* Set process priority to high */
1660                prio = 6;
1661            }
1662            break;
1663        case 7:
1664            if (ctx->hv && !ctx->pr) {
1665                /* Set process priority to very high */
1666                prio = 7;
1667            }
1668            break;
1669#endif
1670        default:
1671            break;
1672        }
1673        if (prio) {
1674            TCGv t0 = tcg_temp_new();
1675            gen_load_spr(t0, SPR_PPR);
1676            tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
1677            tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
1678            gen_store_spr(SPR_PPR, t0);
1679            tcg_temp_free(t0);
1680        }
1681#if !defined(CONFIG_USER_ONLY)
1682        /*
1683         * Pause out of TCG otherwise spin loops with smt_low eat too
1684         * much CPU and the kernel hangs.  This applies to all
1685         * encodings other than no-op, e.g., miso(rs=26), yield(27),
1686         * mdoio(29), mdoom(30), and all currently undefined.
1687         */
1688        gen_pause(ctx);
1689#endif
1690#endif
1691    }
1692}
1693/* orc & orc. */
1694GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
1695
1696/* xor & xor. */
1697static void gen_xor(DisasContext *ctx)
1698{
1699    /* Optimisation for "set to zero" case */
1700    if (rS(ctx->opcode) != rB(ctx->opcode)) {
1701        tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
1702                       cpu_gpr[rB(ctx->opcode)]);
1703    } else {
1704        tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
1705    }
1706    if (unlikely(Rc(ctx->opcode) != 0)) {
1707        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1708    }
1709}
1710
1711/* ori */
1712static void gen_ori(DisasContext *ctx)
1713{
1714    target_ulong uimm = UIMM(ctx->opcode);
1715
1716    if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1717        return;
1718    }
1719    tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1720}
1721
1722/* oris */
1723static void gen_oris(DisasContext *ctx)
1724{
1725    target_ulong uimm = UIMM(ctx->opcode);
1726
1727    if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1728        /* NOP */
1729        return;
1730    }
1731    tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
1732                   uimm << 16);
1733}
1734
1735/* xori */
1736static void gen_xori(DisasContext *ctx)
1737{
1738    target_ulong uimm = UIMM(ctx->opcode);
1739
1740    if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1741        /* NOP */
1742        return;
1743    }
1744    tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
1745}
1746
1747/* xoris */
1748static void gen_xoris(DisasContext *ctx)
1749{
1750    target_ulong uimm = UIMM(ctx->opcode);
1751
1752    if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
1753        /* NOP */
1754        return;
1755    }
1756    tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
1757                    uimm << 16);
1758}
1759
1760/* popcntb : PowerPC 2.03 specification */
1761static void gen_popcntb(DisasContext *ctx)
1762{
1763    gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1764}
1765
1766static void gen_popcntw(DisasContext *ctx)
1767{
1768#if defined(TARGET_PPC64)
1769    gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1770#else
1771    tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1772#endif
1773}
1774
1775#if defined(TARGET_PPC64)
1776/* popcntd: PowerPC 2.06 specification */
1777static void gen_popcntd(DisasContext *ctx)
1778{
1779    tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
1780}
1781#endif
1782
1783/* prtyw: PowerPC 2.05 specification */
1784static void gen_prtyw(DisasContext *ctx)
1785{
1786    TCGv ra = cpu_gpr[rA(ctx->opcode)];
1787    TCGv rs = cpu_gpr[rS(ctx->opcode)];
1788    TCGv t0 = tcg_temp_new();
1789    tcg_gen_shri_tl(t0, rs, 16);
1790    tcg_gen_xor_tl(ra, rs, t0);
1791    tcg_gen_shri_tl(t0, ra, 8);
1792    tcg_gen_xor_tl(ra, ra, t0);
1793    tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
1794    tcg_temp_free(t0);
1795}
1796
1797#if defined(TARGET_PPC64)
1798/* prtyd: PowerPC 2.05 specification */
1799static void gen_prtyd(DisasContext *ctx)
1800{
1801    TCGv ra = cpu_gpr[rA(ctx->opcode)];
1802    TCGv rs = cpu_gpr[rS(ctx->opcode)];
1803    TCGv t0 = tcg_temp_new();
1804    tcg_gen_shri_tl(t0, rs, 32);
1805    tcg_gen_xor_tl(ra, rs, t0);
1806    tcg_gen_shri_tl(t0, ra, 16);
1807    tcg_gen_xor_tl(ra, ra, t0);
1808    tcg_gen_shri_tl(t0, ra, 8);
1809    tcg_gen_xor_tl(ra, ra, t0);
1810    tcg_gen_andi_tl(ra, ra, 1);
1811    tcg_temp_free(t0);
1812}
1813#endif
1814
1815#if defined(TARGET_PPC64)
1816/* bpermd */
1817static void gen_bpermd(DisasContext *ctx)
1818{
1819    gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
1820                      cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1821}
1822#endif
1823
1824#if defined(TARGET_PPC64)
1825/* extsw & extsw. */
1826GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
1827
1828/* cntlzd */
1829static void gen_cntlzd(DisasContext *ctx)
1830{
1831    tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
1832    if (unlikely(Rc(ctx->opcode) != 0)) {
1833        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1834    }
1835}
1836
1837/* cnttzd */
1838static void gen_cnttzd(DisasContext *ctx)
1839{
1840    tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
1841    if (unlikely(Rc(ctx->opcode) != 0)) {
1842        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
1843    }
1844}
1845
1846/* darn */
1847static void gen_darn(DisasContext *ctx)
1848{
1849    int l = L(ctx->opcode);
1850
1851    if (l > 2) {
1852        tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
1853    } else {
1854        if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1855            gen_io_start();
1856        }
1857        if (l == 0) {
1858            gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
1859        } else {
1860            /* Return 64-bit random for both CRN and RRN */
1861            gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
1862        }
1863        if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1864            gen_stop_exception(ctx);
1865        }
1866    }
1867}
1868#endif
1869
1870/***                             Integer rotate                            ***/
1871
1872/* rlwimi & rlwimi. */
1873static void gen_rlwimi(DisasContext *ctx)
1874{
1875    TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1876    TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1877    uint32_t sh = SH(ctx->opcode);
1878    uint32_t mb = MB(ctx->opcode);
1879    uint32_t me = ME(ctx->opcode);
1880
1881    if (sh == (31 - me) && mb <= me) {
1882        tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
1883    } else {
1884        target_ulong mask;
1885        TCGv t1;
1886
1887#if defined(TARGET_PPC64)
1888        mb += 32;
1889        me += 32;
1890#endif
1891        mask = MASK(mb, me);
1892
1893        t1 = tcg_temp_new();
1894        if (mask <= 0xffffffffu) {
1895            TCGv_i32 t0 = tcg_temp_new_i32();
1896            tcg_gen_trunc_tl_i32(t0, t_rs);
1897            tcg_gen_rotli_i32(t0, t0, sh);
1898            tcg_gen_extu_i32_tl(t1, t0);
1899            tcg_temp_free_i32(t0);
1900        } else {
1901#if defined(TARGET_PPC64)
1902            tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32);
1903            tcg_gen_rotli_i64(t1, t1, sh);
1904#else
1905            g_assert_not_reached();
1906#endif
1907        }
1908
1909        tcg_gen_andi_tl(t1, t1, mask);
1910        tcg_gen_andi_tl(t_ra, t_ra, ~mask);
1911        tcg_gen_or_tl(t_ra, t_ra, t1);
1912        tcg_temp_free(t1);
1913    }
1914    if (unlikely(Rc(ctx->opcode) != 0)) {
1915        gen_set_Rc0(ctx, t_ra);
1916    }
1917}
1918
1919/* rlwinm & rlwinm. */
1920static void gen_rlwinm(DisasContext *ctx)
1921{
1922    TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1923    TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1924    int sh = SH(ctx->opcode);
1925    int mb = MB(ctx->opcode);
1926    int me = ME(ctx->opcode);
1927    int len = me - mb + 1;
1928    int rsh = (32 - sh) & 31;
1929
1930    if (sh != 0 && len > 0 && me == (31 - sh)) {
1931        tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
1932    } else if (me == 31 && rsh + len <= 32) {
1933        tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
1934    } else {
1935        target_ulong mask;
1936#if defined(TARGET_PPC64)
1937        mb += 32;
1938        me += 32;
1939#endif
1940        mask = MASK(mb, me);
1941        if (sh == 0) {
1942            tcg_gen_andi_tl(t_ra, t_rs, mask);
1943        } else if (mask <= 0xffffffffu) {
1944            TCGv_i32 t0 = tcg_temp_new_i32();
1945            tcg_gen_trunc_tl_i32(t0, t_rs);
1946            tcg_gen_rotli_i32(t0, t0, sh);
1947            tcg_gen_andi_i32(t0, t0, mask);
1948            tcg_gen_extu_i32_tl(t_ra, t0);
1949            tcg_temp_free_i32(t0);
1950        } else {
1951#if defined(TARGET_PPC64)
1952            tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
1953            tcg_gen_rotli_i64(t_ra, t_ra, sh);
1954            tcg_gen_andi_i64(t_ra, t_ra, mask);
1955#else
1956            g_assert_not_reached();
1957#endif
1958        }
1959    }
1960    if (unlikely(Rc(ctx->opcode) != 0)) {
1961        gen_set_Rc0(ctx, t_ra);
1962    }
1963}
1964
1965/* rlwnm & rlwnm. */
1966static void gen_rlwnm(DisasContext *ctx)
1967{
1968    TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
1969    TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
1970    TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
1971    uint32_t mb = MB(ctx->opcode);
1972    uint32_t me = ME(ctx->opcode);
1973    target_ulong mask;
1974
1975#if defined(TARGET_PPC64)
1976    mb += 32;
1977    me += 32;
1978#endif
1979    mask = MASK(mb, me);
1980
1981    if (mask <= 0xffffffffu) {
1982        TCGv_i32 t0 = tcg_temp_new_i32();
1983        TCGv_i32 t1 = tcg_temp_new_i32();
1984        tcg_gen_trunc_tl_i32(t0, t_rb);
1985        tcg_gen_trunc_tl_i32(t1, t_rs);
1986        tcg_gen_andi_i32(t0, t0, 0x1f);
1987        tcg_gen_rotl_i32(t1, t1, t0);
1988        tcg_gen_extu_i32_tl(t_ra, t1);
1989        tcg_temp_free_i32(t0);
1990        tcg_temp_free_i32(t1);
1991    } else {
1992#if defined(TARGET_PPC64)
1993        TCGv_i64 t0 = tcg_temp_new_i64();
1994        tcg_gen_andi_i64(t0, t_rb, 0x1f);
1995        tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
1996        tcg_gen_rotl_i64(t_ra, t_ra, t0);
1997        tcg_temp_free_i64(t0);
1998#else
1999        g_assert_not_reached();
2000#endif
2001    }
2002
2003    tcg_gen_andi_tl(t_ra, t_ra, mask);
2004
2005    if (unlikely(Rc(ctx->opcode) != 0)) {
2006        gen_set_Rc0(ctx, t_ra);
2007    }
2008}
2009
2010#if defined(TARGET_PPC64)
2011#define GEN_PPC64_R2(name, opc1, opc2)                                        \
2012static void glue(gen_, name##0)(DisasContext *ctx)                            \
2013{                                                                             \
2014    gen_##name(ctx, 0);                                                       \
2015}                                                                             \
2016                                                                              \
2017static void glue(gen_, name##1)(DisasContext *ctx)                            \
2018{                                                                             \
2019    gen_##name(ctx, 1);                                                       \
2020}
2021#define GEN_PPC64_R4(name, opc1, opc2)                                        \
2022static void glue(gen_, name##0)(DisasContext *ctx)                            \
2023{                                                                             \
2024    gen_##name(ctx, 0, 0);                                                    \
2025}                                                                             \
2026                                                                              \
2027static void glue(gen_, name##1)(DisasContext *ctx)                            \
2028{                                                                             \
2029    gen_##name(ctx, 0, 1);                                                    \
2030}                                                                             \
2031                                                                              \
2032static void glue(gen_, name##2)(DisasContext *ctx)                            \
2033{                                                                             \
2034    gen_##name(ctx, 1, 0);                                                    \
2035}                                                                             \
2036                                                                              \
2037static void glue(gen_, name##3)(DisasContext *ctx)                            \
2038{                                                                             \
2039    gen_##name(ctx, 1, 1);                                                    \
2040}
2041
2042static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
2043{
2044    TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2045    TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2046    int len = me - mb + 1;
2047    int rsh = (64 - sh) & 63;
2048
2049    if (sh != 0 && len > 0 && me == (63 - sh)) {
2050        tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2051    } else if (me == 63 && rsh + len <= 64) {
2052        tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2053    } else {
2054        tcg_gen_rotli_tl(t_ra, t_rs, sh);
2055        tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2056    }
2057    if (unlikely(Rc(ctx->opcode) != 0)) {
2058        gen_set_Rc0(ctx, t_ra);
2059    }
2060}
2061
2062/* rldicl - rldicl. */
2063static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
2064{
2065    uint32_t sh, mb;
2066
2067    sh = SH(ctx->opcode) | (shn << 5);
2068    mb = MB(ctx->opcode) | (mbn << 5);
2069    gen_rldinm(ctx, mb, 63, sh);
2070}
2071GEN_PPC64_R4(rldicl, 0x1E, 0x00);
2072
2073/* rldicr - rldicr. */
2074static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
2075{
2076    uint32_t sh, me;
2077
2078    sh = SH(ctx->opcode) | (shn << 5);
2079    me = MB(ctx->opcode) | (men << 5);
2080    gen_rldinm(ctx, 0, me, sh);
2081}
2082GEN_PPC64_R4(rldicr, 0x1E, 0x02);
2083
2084/* rldic - rldic. */
2085static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
2086{
2087    uint32_t sh, mb;
2088
2089    sh = SH(ctx->opcode) | (shn << 5);
2090    mb = MB(ctx->opcode) | (mbn << 5);
2091    gen_rldinm(ctx, mb, 63 - sh, sh);
2092}
2093GEN_PPC64_R4(rldic, 0x1E, 0x04);
2094
2095static void gen_rldnm(DisasContext *ctx, int mb, int me)
2096{
2097    TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2098    TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2099    TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2100    TCGv t0;
2101
2102    t0 = tcg_temp_new();
2103    tcg_gen_andi_tl(t0, t_rb, 0x3f);
2104    tcg_gen_rotl_tl(t_ra, t_rs, t0);
2105    tcg_temp_free(t0);
2106
2107    tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2108    if (unlikely(Rc(ctx->opcode) != 0)) {
2109        gen_set_Rc0(ctx, t_ra);
2110    }
2111}
2112
2113/* rldcl - rldcl. */
2114static inline void gen_rldcl(DisasContext *ctx, int mbn)
2115{
2116    uint32_t mb;
2117
2118    mb = MB(ctx->opcode) | (mbn << 5);
2119    gen_rldnm(ctx, mb, 63);
2120}
2121GEN_PPC64_R2(rldcl, 0x1E, 0x08);
2122
2123/* rldcr - rldcr. */
2124static inline void gen_rldcr(DisasContext *ctx, int men)
2125{
2126    uint32_t me;
2127
2128    me = MB(ctx->opcode) | (men << 5);
2129    gen_rldnm(ctx, 0, me);
2130}
2131GEN_PPC64_R2(rldcr, 0x1E, 0x09);
2132
2133/* rldimi - rldimi. */
2134static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
2135{
2136    TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2137    TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2138    uint32_t sh = SH(ctx->opcode) | (shn << 5);
2139    uint32_t mb = MB(ctx->opcode) | (mbn << 5);
2140    uint32_t me = 63 - sh;
2141
2142    if (mb <= me) {
2143        tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2144    } else {
2145        target_ulong mask = MASK(mb, me);
2146        TCGv t1 = tcg_temp_new();
2147
2148        tcg_gen_rotli_tl(t1, t_rs, sh);
2149        tcg_gen_andi_tl(t1, t1, mask);
2150        tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2151        tcg_gen_or_tl(t_ra, t_ra, t1);
2152        tcg_temp_free(t1);
2153    }
2154    if (unlikely(Rc(ctx->opcode) != 0)) {
2155        gen_set_Rc0(ctx, t_ra);
2156    }
2157}
2158GEN_PPC64_R4(rldimi, 0x1E, 0x06);
2159#endif
2160
2161/***                             Integer shift                             ***/
2162
2163/* slw & slw. */
2164static void gen_slw(DisasContext *ctx)
2165{
2166    TCGv t0, t1;
2167
2168    t0 = tcg_temp_new();
2169    /* AND rS with a mask that is 0 when rB >= 0x20 */
2170#if defined(TARGET_PPC64)
2171    tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2172    tcg_gen_sari_tl(t0, t0, 0x3f);
2173#else
2174    tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2175    tcg_gen_sari_tl(t0, t0, 0x1f);
2176#endif
2177    tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2178    t1 = tcg_temp_new();
2179    tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2180    tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2181    tcg_temp_free(t1);
2182    tcg_temp_free(t0);
2183    tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2184    if (unlikely(Rc(ctx->opcode) != 0)) {
2185        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2186    }
2187}
2188
2189/* sraw & sraw. */
2190static void gen_sraw(DisasContext *ctx)
2191{
2192    gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
2193                    cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2194    if (unlikely(Rc(ctx->opcode) != 0)) {
2195        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2196    }
2197}
2198
2199/* srawi & srawi. */
2200static void gen_srawi(DisasContext *ctx)
2201{
2202    int sh = SH(ctx->opcode);
2203    TCGv dst = cpu_gpr[rA(ctx->opcode)];
2204    TCGv src = cpu_gpr[rS(ctx->opcode)];
2205    if (sh == 0) {
2206        tcg_gen_ext32s_tl(dst, src);
2207        tcg_gen_movi_tl(cpu_ca, 0);
2208        if (is_isa300(ctx)) {
2209            tcg_gen_movi_tl(cpu_ca32, 0);
2210        }
2211    } else {
2212        TCGv t0;
2213        tcg_gen_ext32s_tl(dst, src);
2214        tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
2215        t0 = tcg_temp_new();
2216        tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
2217        tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2218        tcg_temp_free(t0);
2219        tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2220        if (is_isa300(ctx)) {
2221            tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2222        }
2223        tcg_gen_sari_tl(dst, dst, sh);
2224    }
2225    if (unlikely(Rc(ctx->opcode) != 0)) {
2226        gen_set_Rc0(ctx, dst);
2227    }
2228}
2229
2230/* srw & srw. */
2231static void gen_srw(DisasContext *ctx)
2232{
2233    TCGv t0, t1;
2234
2235    t0 = tcg_temp_new();
2236    /* AND rS with a mask that is 0 when rB >= 0x20 */
2237#if defined(TARGET_PPC64)
2238    tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2239    tcg_gen_sari_tl(t0, t0, 0x3f);
2240#else
2241    tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2242    tcg_gen_sari_tl(t0, t0, 0x1f);
2243#endif
2244    tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2245    tcg_gen_ext32u_tl(t0, t0);
2246    t1 = tcg_temp_new();
2247    tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2248    tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2249    tcg_temp_free(t1);
2250    tcg_temp_free(t0);
2251    if (unlikely(Rc(ctx->opcode) != 0)) {
2252        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2253    }
2254}
2255
2256#if defined(TARGET_PPC64)
2257/* sld & sld. */
2258static void gen_sld(DisasContext *ctx)
2259{
2260    TCGv t0, t1;
2261
2262    t0 = tcg_temp_new();
2263    /* AND rS with a mask that is 0 when rB >= 0x40 */
2264    tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2265    tcg_gen_sari_tl(t0, t0, 0x3f);
2266    tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2267    t1 = tcg_temp_new();
2268    tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2269    tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2270    tcg_temp_free(t1);
2271    tcg_temp_free(t0);
2272    if (unlikely(Rc(ctx->opcode) != 0)) {
2273        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2274    }
2275}
2276
2277/* srad & srad. */
2278static void gen_srad(DisasContext *ctx)
2279{
2280    gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
2281                    cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2282    if (unlikely(Rc(ctx->opcode) != 0)) {
2283        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2284    }
2285}
2286/* sradi & sradi. */
2287static inline void gen_sradi(DisasContext *ctx, int n)
2288{
2289    int sh = SH(ctx->opcode) + (n << 5);
2290    TCGv dst = cpu_gpr[rA(ctx->opcode)];
2291    TCGv src = cpu_gpr[rS(ctx->opcode)];
2292    if (sh == 0) {
2293        tcg_gen_mov_tl(dst, src);
2294        tcg_gen_movi_tl(cpu_ca, 0);
2295        if (is_isa300(ctx)) {
2296            tcg_gen_movi_tl(cpu_ca32, 0);
2297        }
2298    } else {
2299        TCGv t0;
2300        tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
2301        t0 = tcg_temp_new();
2302        tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
2303        tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2304        tcg_temp_free(t0);
2305        tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2306        if (is_isa300(ctx)) {
2307            tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2308        }
2309        tcg_gen_sari_tl(dst, src, sh);
2310    }
2311    if (unlikely(Rc(ctx->opcode) != 0)) {
2312        gen_set_Rc0(ctx, dst);
2313    }
2314}
2315
2316static void gen_sradi0(DisasContext *ctx)
2317{
2318    gen_sradi(ctx, 0);
2319}
2320
2321static void gen_sradi1(DisasContext *ctx)
2322{
2323    gen_sradi(ctx, 1);
2324}
2325
2326/* extswsli & extswsli. */
2327static inline void gen_extswsli(DisasContext *ctx, int n)
2328{
2329    int sh = SH(ctx->opcode) + (n << 5);
2330    TCGv dst = cpu_gpr[rA(ctx->opcode)];
2331    TCGv src = cpu_gpr[rS(ctx->opcode)];
2332
2333    tcg_gen_ext32s_tl(dst, src);
2334    tcg_gen_shli_tl(dst, dst, sh);
2335    if (unlikely(Rc(ctx->opcode) != 0)) {
2336        gen_set_Rc0(ctx, dst);
2337    }
2338}
2339
2340static void gen_extswsli0(DisasContext *ctx)
2341{
2342    gen_extswsli(ctx, 0);
2343}
2344
2345static void gen_extswsli1(DisasContext *ctx)
2346{
2347    gen_extswsli(ctx, 1);
2348}
2349
2350/* srd & srd. */
2351static void gen_srd(DisasContext *ctx)
2352{
2353    TCGv t0, t1;
2354
2355    t0 = tcg_temp_new();
2356    /* AND rS with a mask that is 0 when rB >= 0x40 */
2357    tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2358    tcg_gen_sari_tl(t0, t0, 0x3f);
2359    tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2360    t1 = tcg_temp_new();
2361    tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2362    tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2363    tcg_temp_free(t1);
2364    tcg_temp_free(t0);
2365    if (unlikely(Rc(ctx->opcode) != 0)) {
2366        gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2367    }
2368}
2369#endif
2370
2371/***                           Addressing modes                            ***/
2372/* Register indirect with immediate index : EA = (rA|0) + SIMM */
2373static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
2374                                      target_long maskl)
2375{
2376    target_long simm = SIMM(ctx->opcode);
2377
2378    simm &= ~maskl;
2379    if (rA(ctx->opcode) == 0) {
2380        if (NARROW_MODE(ctx)) {
2381            simm = (uint32_t)simm;
2382        }
2383        tcg_gen_movi_tl(EA, simm);
2384    } else if (likely(simm != 0)) {
2385        tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
2386        if (NARROW_MODE(ctx)) {
2387            tcg_gen_ext32u_tl(EA, EA);
2388        }
2389    } else {
2390        if (NARROW_MODE(ctx)) {
2391            tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2392        } else {
2393            tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2394        }
2395    }
2396}
2397
2398static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
2399{
2400    if (rA(ctx->opcode) == 0) {
2401        if (NARROW_MODE(ctx)) {
2402            tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2403        } else {
2404            tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2405        }
2406    } else {
2407        tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2408        if (NARROW_MODE(ctx)) {
2409            tcg_gen_ext32u_tl(EA, EA);
2410        }
2411    }
2412}
2413
2414static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
2415{
2416    if (rA(ctx->opcode) == 0) {
2417        tcg_gen_movi_tl(EA, 0);
2418    } else if (NARROW_MODE(ctx)) {
2419        tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2420    } else {
2421        tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2422    }
2423}
2424
2425static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
2426                                target_long val)
2427{
2428    tcg_gen_addi_tl(ret, arg1, val);
2429    if (NARROW_MODE(ctx)) {
2430        tcg_gen_ext32u_tl(ret, ret);
2431    }
2432}
2433
2434static inline void gen_align_no_le(DisasContext *ctx)
2435{
2436    gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
2437                      (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
2438}
2439
2440/***                             Integer load                              ***/
2441#define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
2442#define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
2443
2444#define GEN_QEMU_LOAD_TL(ldop, op)                                      \
2445static void glue(gen_qemu_, ldop)(DisasContext *ctx,                    \
2446                                  TCGv val,                             \
2447                                  TCGv addr)                            \
2448{                                                                       \
2449    tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op);                    \
2450}
2451
2452GEN_QEMU_LOAD_TL(ld8u,  DEF_MEMOP(MO_UB))
2453GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
2454GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
2455GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
2456GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
2457
2458GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
2459GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
2460
2461#define GEN_QEMU_LOAD_64(ldop, op)                                  \
2462static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx,    \
2463                                             TCGv_i64 val,          \
2464                                             TCGv addr)             \
2465{                                                                   \
2466    tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op);               \
2467}
2468
2469GEN_QEMU_LOAD_64(ld8u,  DEF_MEMOP(MO_UB))
2470GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
2471GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
2472GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
2473GEN_QEMU_LOAD_64(ld64,  DEF_MEMOP(MO_Q))
2474
2475#if defined(TARGET_PPC64)
2476GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q))
2477#endif
2478
2479#define GEN_QEMU_STORE_TL(stop, op)                                     \
2480static void glue(gen_qemu_, stop)(DisasContext *ctx,                    \
2481                                  TCGv val,                             \
2482                                  TCGv addr)                            \
2483{                                                                       \
2484    tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op);                    \
2485}
2486
2487GEN_QEMU_STORE_TL(st8,  DEF_MEMOP(MO_UB))
2488GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
2489GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
2490
2491GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
2492GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
2493
2494#define GEN_QEMU_STORE_64(stop, op)                               \
2495static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx,  \
2496                                              TCGv_i64 val,       \
2497                                              TCGv addr)          \
2498{                                                                 \
2499    tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op);             \
2500}
2501
2502GEN_QEMU_STORE_64(st8,  DEF_MEMOP(MO_UB))
2503GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
2504GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
2505GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q))
2506
2507#if defined(TARGET_PPC64)
2508GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q))
2509#endif
2510
2511#define GEN_LD(name, ldop, opc, type)                                         \
2512static void glue(gen_, name)(DisasContext *ctx)                               \
2513{                                                                             \
2514    TCGv EA;                                                                  \
2515    gen_set_access_type(ctx, ACCESS_INT);                                     \
2516    EA = tcg_temp_new();                                                      \
2517    gen_addr_imm_index(ctx, EA, 0);                                           \
2518    gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA);                       \
2519    tcg_temp_free(EA);                                                        \
2520}
2521
2522#define GEN_LDU(name, ldop, opc, type)                                        \
2523static void glue(gen_, name##u)(DisasContext *ctx)                            \
2524{                                                                             \
2525    TCGv EA;                                                                  \
2526    if (unlikely(rA(ctx->opcode) == 0 ||                                      \
2527                 rA(ctx->opcode) == rD(ctx->opcode))) {                       \
2528        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);                   \
2529        return;                                                               \
2530    }                                                                         \
2531    gen_set_access_type(ctx, ACCESS_INT);                                     \
2532    EA = tcg_temp_new();                                                      \
2533    if (type == PPC_64B)                                                      \
2534        gen_addr_imm_index(ctx, EA, 0x03);                                    \
2535    else                                                                      \
2536        gen_addr_imm_index(ctx, EA, 0);                                       \
2537    gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA);                       \
2538    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);                             \
2539    tcg_temp_free(EA);                                                        \
2540}
2541
2542#define GEN_LDUX(name, ldop, opc2, opc3, type)                                \
2543static void glue(gen_, name##ux)(DisasContext *ctx)                           \
2544{                                                                             \
2545    TCGv EA;                                                                  \
2546    if (unlikely(rA(ctx->opcode) == 0 ||                                      \
2547                 rA(ctx->opcode) == rD(ctx->opcode))) {                       \
2548        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);                   \
2549        return;                                                               \
2550    }                                                                         \
2551    gen_set_access_type(ctx, ACCESS_INT);                                     \
2552    EA = tcg_temp_new();                                                      \
2553    gen_addr_reg_index(ctx, EA);                                              \
2554    gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA);                       \
2555    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);                             \
2556    tcg_temp_free(EA);                                                        \
2557}
2558
2559#define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk)                   \
2560static void glue(gen_, name##x)(DisasContext *ctx)                            \
2561{                                                                             \
2562    TCGv EA;                                                                  \
2563    chk;                                                                      \
2564    gen_set_access_type(ctx, ACCESS_INT);                                     \
2565    EA = tcg_temp_new();                                                      \
2566    gen_addr_reg_index(ctx, EA);                                              \
2567    gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA);                       \
2568    tcg_temp_free(EA);                                                        \
2569}
2570
2571#define GEN_LDX(name, ldop, opc2, opc3, type)                                 \
2572    GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE)
2573
2574#define GEN_LDX_HVRM(name, ldop, opc2, opc3, type)                            \
2575    GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
2576
2577#define GEN_LDS(name, ldop, op, type)                                         \
2578GEN_LD(name, ldop, op | 0x20, type);                                          \
2579GEN_LDU(name, ldop, op | 0x21, type);                                         \
2580GEN_LDUX(name, ldop, 0x17, op | 0x01, type);                                  \
2581GEN_LDX(name, ldop, 0x17, op | 0x00, type)
2582
2583/* lbz lbzu lbzux lbzx */
2584GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER);
2585/* lha lhau lhaux lhax */
2586GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER);
2587/* lhz lhzu lhzux lhzx */
2588GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER);
2589/* lwz lwzu lwzux lwzx */
2590GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER);
2591
2592#define GEN_LDEPX(name, ldop, opc2, opc3)                                     \
2593static void glue(gen_, name##epx)(DisasContext *ctx)                          \
2594{                                                                             \
2595    TCGv EA;                                                                  \
2596    CHK_SV;                                                                   \
2597    gen_set_access_type(ctx, ACCESS_INT);                                     \
2598    EA = tcg_temp_new();                                                      \
2599    gen_addr_reg_index(ctx, EA);                                              \
2600    tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\
2601    tcg_temp_free(EA);                                                        \
2602}
2603
2604GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
2605GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
2606GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
2607#if defined(TARGET_PPC64)
2608GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00)
2609#endif
2610
2611#if defined(TARGET_PPC64)
2612/* lwaux */
2613GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
2614/* lwax */
2615GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
2616/* ldux */
2617GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B);
2618/* ldx */
2619GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B);
2620
2621/* CI load/store variants */
2622GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
2623GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
2624GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
2625GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
2626
2627static void gen_ld(DisasContext *ctx)
2628{
2629    TCGv EA;
2630    if (Rc(ctx->opcode)) {
2631        if (unlikely(rA(ctx->opcode) == 0 ||
2632                     rA(ctx->opcode) == rD(ctx->opcode))) {
2633            gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2634            return;
2635        }
2636    }
2637    gen_set_access_type(ctx, ACCESS_INT);
2638    EA = tcg_temp_new();
2639    gen_addr_imm_index(ctx, EA, 0x03);
2640    if (ctx->opcode & 0x02) {
2641        /* lwa (lwau is undefined) */
2642        gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2643    } else {
2644        /* ld - ldu */
2645        gen_qemu_ld64_i64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
2646    }
2647    if (Rc(ctx->opcode)) {
2648        tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2649    }
2650    tcg_temp_free(EA);
2651}
2652
2653/* lq */
2654static void gen_lq(DisasContext *ctx)
2655{
2656    int ra, rd;
2657    TCGv EA, hi, lo;
2658
2659    /* lq is a legal user mode instruction starting in ISA 2.07 */
2660    bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2661    bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2662
2663    if (!legal_in_user_mode && ctx->pr) {
2664        gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2665        return;
2666    }
2667
2668    if (!le_is_supported && ctx->le_mode) {
2669        gen_align_no_le(ctx);
2670        return;
2671    }
2672    ra = rA(ctx->opcode);
2673    rd = rD(ctx->opcode);
2674    if (unlikely((rd & 1) || rd == ra)) {
2675        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2676        return;
2677    }
2678
2679    gen_set_access_type(ctx, ACCESS_INT);
2680    EA = tcg_temp_new();
2681    gen_addr_imm_index(ctx, EA, 0x0F);
2682
2683    /* Note that the low part is always in RD+1, even in LE mode.  */
2684    lo = cpu_gpr[rd + 1];
2685    hi = cpu_gpr[rd];
2686
2687    if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2688        if (HAVE_ATOMIC128) {
2689            TCGv_i32 oi = tcg_temp_new_i32();
2690            if (ctx->le_mode) {
2691                tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
2692                gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
2693            } else {
2694                tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
2695                gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
2696            }
2697            tcg_temp_free_i32(oi);
2698            tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
2699        } else {
2700            /* Restart with exclusive lock.  */
2701            gen_helper_exit_atomic(cpu_env);
2702            ctx->base.is_jmp = DISAS_NORETURN;
2703        }
2704    } else if (ctx->le_mode) {
2705        tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ);
2706        gen_addr_add(ctx, EA, EA, 8);
2707        tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
2708    } else {
2709        tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ);
2710        gen_addr_add(ctx, EA, EA, 8);
2711        tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
2712    }
2713    tcg_temp_free(EA);
2714}
2715#endif
2716
2717/***                              Integer store                            ***/
2718#define GEN_ST(name, stop, opc, type)                                         \
2719static void glue(gen_, name)(DisasContext *ctx)                               \
2720{                                                                             \
2721    TCGv EA;                                                                  \
2722    gen_set_access_type(ctx, ACCESS_INT);                                     \
2723    EA = tcg_temp_new();                                                      \
2724    gen_addr_imm_index(ctx, EA, 0);                                           \
2725    gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA);                       \
2726    tcg_temp_free(EA);                                                        \
2727}
2728
2729#define GEN_STU(name, stop, opc, type)                                        \
2730static void glue(gen_, stop##u)(DisasContext *ctx)                            \
2731{                                                                             \
2732    TCGv EA;                                                                  \
2733    if (unlikely(rA(ctx->opcode) == 0)) {                                     \
2734        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);                   \
2735        return;                                                               \
2736    }                                                                         \
2737    gen_set_access_type(ctx, ACCESS_INT);                                     \
2738    EA = tcg_temp_new();                                                      \
2739    if (type == PPC_64B)                                                      \
2740        gen_addr_imm_index(ctx, EA, 0x03);                                    \
2741    else                                                                      \
2742        gen_addr_imm_index(ctx, EA, 0);                                       \
2743    gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA);                       \
2744    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);                             \
2745    tcg_temp_free(EA);                                                        \
2746}
2747
2748#define GEN_STUX(name, stop, opc2, opc3, type)                                \
2749static void glue(gen_, name##ux)(DisasContext *ctx)                           \
2750{                                                                             \
2751    TCGv EA;                                                                  \
2752    if (unlikely(rA(ctx->opcode) == 0)) {                                     \
2753        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);                   \
2754        return;                                                               \
2755    }                                                                         \
2756    gen_set_access_type(ctx, ACCESS_INT);                                     \
2757    EA = tcg_temp_new();                                                      \
2758    gen_addr_reg_index(ctx, EA);                                              \
2759    gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA);                       \
2760    tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);                             \
2761    tcg_temp_free(EA);                                                        \
2762}
2763
2764#define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk)                   \
2765static void glue(gen_, name##x)(DisasContext *ctx)                            \
2766{                                                                             \
2767    TCGv EA;                                                                  \
2768    chk;                                                                      \
2769    gen_set_access_type(ctx, ACCESS_INT);                                     \
2770    EA = tcg_temp_new();                                                      \
2771    gen_addr_reg_index(ctx, EA);                                              \
2772    gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA);                       \
2773    tcg_temp_free(EA);                                                        \
2774}
2775#define GEN_STX(name, stop, opc2, opc3, type)                                 \
2776    GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
2777
2778#define GEN_STX_HVRM(name, stop, opc2, opc3, type)                            \
2779    GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
2780
2781#define GEN_STS(name, stop, op, type)                                         \
2782GEN_ST(name, stop, op | 0x20, type);                                          \
2783GEN_STU(name, stop, op | 0x21, type);                                         \
2784GEN_STUX(name, stop, 0x17, op | 0x01, type);                                  \
2785GEN_STX(name, stop, 0x17, op | 0x00, type)
2786
2787/* stb stbu stbux stbx */
2788GEN_STS(stb, st8, 0x06, PPC_INTEGER);
2789/* sth sthu sthux sthx */
2790GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
2791/* stw stwu stwux stwx */
2792GEN_STS(stw, st32, 0x04, PPC_INTEGER);
2793
2794#define GEN_STEPX(name, stop, opc2, opc3)                                     \
2795static void glue(gen_, name##epx)(DisasContext *ctx)                          \
2796{                                                                             \
2797    TCGv EA;                                                                  \
2798    CHK_SV;                                                                   \
2799    gen_set_access_type(ctx, ACCESS_INT);                                     \
2800    EA = tcg_temp_new();                                                      \
2801    gen_addr_reg_index(ctx, EA);                                              \
2802    tcg_gen_qemu_st_tl(                                                       \
2803        cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop);              \
2804    tcg_temp_free(EA);                                                        \
2805}
2806
2807GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
2808GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
2809GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
2810#if defined(TARGET_PPC64)
2811GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1d, 0x04)
2812#endif
2813
2814#if defined(TARGET_PPC64)
2815GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B);
2816GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B);
2817GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
2818GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
2819GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
2820GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
2821
2822static void gen_std(DisasContext *ctx)
2823{
2824    int rs;
2825    TCGv EA;
2826
2827    rs = rS(ctx->opcode);
2828    if ((ctx->opcode & 0x3) == 0x2) { /* stq */
2829        bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2830        bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
2831        TCGv hi, lo;
2832
2833        if (!(ctx->insns_flags & PPC_64BX)) {
2834            gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2835        }
2836
2837        if (!legal_in_user_mode && ctx->pr) {
2838            gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
2839            return;
2840        }
2841
2842        if (!le_is_supported && ctx->le_mode) {
2843            gen_align_no_le(ctx);
2844            return;
2845        }
2846
2847        if (unlikely(rs & 1)) {
2848            gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2849            return;
2850        }
2851        gen_set_access_type(ctx, ACCESS_INT);
2852        EA = tcg_temp_new();
2853        gen_addr_imm_index(ctx, EA, 0x03);
2854
2855        /* Note that the low part is always in RS+1, even in LE mode.  */
2856        lo = cpu_gpr[rs + 1];
2857        hi = cpu_gpr[rs];
2858
2859        if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2860            if (HAVE_ATOMIC128) {
2861                TCGv_i32 oi = tcg_temp_new_i32();
2862                if (ctx->le_mode) {
2863                    tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
2864                    gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi);
2865                } else {
2866                    tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
2867                    gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi);
2868                }
2869                tcg_temp_free_i32(oi);
2870            } else {
2871                /* Restart with exclusive lock.  */
2872                gen_helper_exit_atomic(cpu_env);
2873                ctx->base.is_jmp = DISAS_NORETURN;
2874            }
2875        } else if (ctx->le_mode) {
2876            tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_LEQ);
2877            gen_addr_add(ctx, EA, EA, 8);
2878            tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_LEQ);
2879        } else {
2880            tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_BEQ);
2881            gen_addr_add(ctx, EA, EA, 8);
2882            tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_BEQ);
2883        }
2884        tcg_temp_free(EA);
2885    } else {
2886        /* std / stdu */
2887        if (Rc(ctx->opcode)) {
2888            if (unlikely(rA(ctx->opcode) == 0)) {
2889                gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
2890                return;
2891            }
2892        }
2893        gen_set_access_type(ctx, ACCESS_INT);
2894        EA = tcg_temp_new();
2895        gen_addr_imm_index(ctx, EA, 0x03);
2896        gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
2897        if (Rc(ctx->opcode)) {
2898            tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
2899        }
2900        tcg_temp_free(EA);
2901    }
2902}
2903#endif
2904/***                Integer load and store with byte reverse               ***/
2905
2906/* lhbrx */
2907GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
2908
2909/* lwbrx */
2910GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
2911
2912#if defined(TARGET_PPC64)
2913/* ldbrx */
2914GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
2915/* stdbrx */
2916GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
2917#endif  /* TARGET_PPC64 */
2918
2919/* sthbrx */
2920GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
2921/* stwbrx */
2922GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
2923
2924/***                    Integer load and store multiple                    ***/
2925
2926/* lmw */
2927static void gen_lmw(DisasContext *ctx)
2928{
2929    TCGv t0;
2930    TCGv_i32 t1;
2931
2932    if (ctx->le_mode) {
2933        gen_align_no_le(ctx);
2934        return;
2935    }
2936    gen_set_access_type(ctx, ACCESS_INT);
2937    t0 = tcg_temp_new();
2938    t1 = tcg_const_i32(rD(ctx->opcode));
2939    gen_addr_imm_index(ctx, t0, 0);
2940    gen_helper_lmw(cpu_env, t0, t1);
2941    tcg_temp_free(t0);
2942    tcg_temp_free_i32(t1);
2943}
2944
2945/* stmw */
2946static void gen_stmw(DisasContext *ctx)
2947{
2948    TCGv t0;
2949    TCGv_i32 t1;
2950
2951    if (ctx->le_mode) {
2952        gen_align_no_le(ctx);
2953        return;
2954    }
2955    gen_set_access_type(ctx, ACCESS_INT);
2956    t0 = tcg_temp_new();
2957    t1 = tcg_const_i32(rS(ctx->opcode));
2958    gen_addr_imm_index(ctx, t0, 0);
2959    gen_helper_stmw(cpu_env, t0, t1);
2960    tcg_temp_free(t0);
2961    tcg_temp_free_i32(t1);
2962}
2963
2964/***                    Integer load and store strings                     ***/
2965
2966/* lswi */
2967/*
2968 * PowerPC32 specification says we must generate an exception if rA is
2969 * in the range of registers to be loaded.  In an other hand, IBM says
2970 * this is valid, but rA won't be loaded.  For now, I'll follow the
2971 * spec...
2972 */
2973static void gen_lswi(DisasContext *ctx)
2974{
2975    TCGv t0;
2976    TCGv_i32 t1, t2;
2977    int nb = NB(ctx->opcode);
2978    int start = rD(ctx->opcode);
2979    int ra = rA(ctx->opcode);
2980    int nr;
2981
2982    if (ctx->le_mode) {
2983        gen_align_no_le(ctx);
2984        return;
2985    }
2986    if (nb == 0) {
2987        nb = 32;
2988    }
2989    nr = DIV_ROUND_UP(nb, 4);
2990    if (unlikely(lsw_reg_in_range(start, nr, ra))) {
2991        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
2992        return;
2993    }
2994    gen_set_access_type(ctx, ACCESS_INT);
2995    t0 = tcg_temp_new();
2996    gen_addr_register(ctx, t0);
2997    t1 = tcg_const_i32(nb);
2998    t2 = tcg_const_i32(start);
2999    gen_helper_lsw(cpu_env, t0, t1, t2);
3000    tcg_temp_free(t0);
3001    tcg_temp_free_i32(t1);
3002    tcg_temp_free_i32(t2);
3003}
3004
3005/* lswx */
3006static void gen_lswx(DisasContext *ctx)
3007{
3008    TCGv t0;
3009    TCGv_i32 t1, t2, t3;
3010
3011    if (ctx->le_mode) {
3012        gen_align_no_le(ctx);
3013        return;
3014    }
3015    gen_set_access_type(ctx, ACCESS_INT);
3016    t0 = tcg_temp_new();
3017    gen_addr_reg_index(ctx, t0);
3018    t1 = tcg_const_i32(rD(ctx->opcode));
3019    t2 = tcg_const_i32(rA(ctx->opcode));
3020    t3 = tcg_const_i32(rB(ctx->opcode));
3021    gen_helper_lswx(cpu_env, t0, t1, t2, t3);
3022    tcg_temp_free(t0);
3023    tcg_temp_free_i32(t1);
3024    tcg_temp_free_i32(t2);
3025    tcg_temp_free_i32(t3);
3026}
3027
3028/* stswi */
3029static void gen_stswi(DisasContext *ctx)
3030{
3031    TCGv t0;
3032    TCGv_i32 t1, t2;
3033    int nb = NB(ctx->opcode);
3034
3035    if (ctx->le_mode) {
3036        gen_align_no_le(ctx);
3037        return;
3038    }
3039    gen_set_access_type(ctx, ACCESS_INT);
3040    t0 = tcg_temp_new();
3041    gen_addr_register(ctx, t0);
3042    if (nb == 0) {
3043        nb = 32;
3044    }
3045    t1 = tcg_const_i32(nb);
3046    t2 = tcg_const_i32(rS(ctx->opcode));
3047    gen_helper_stsw(cpu_env, t0, t1, t2);
3048    tcg_temp_free(t0);
3049    tcg_temp_free_i32(t1);
3050    tcg_temp_free_i32(t2);
3051}
3052
3053/* stswx */
3054static void gen_stswx(DisasContext *ctx)
3055{
3056    TCGv t0;
3057    TCGv_i32 t1, t2;
3058
3059    if (ctx->le_mode) {
3060        gen_align_no_le(ctx);
3061        return;
3062    }
3063    gen_set_access_type(ctx, ACCESS_INT);
3064    t0 = tcg_temp_new();
3065    gen_addr_reg_index(ctx, t0);
3066    t1 = tcg_temp_new_i32();
3067    tcg_gen_trunc_tl_i32(t1, cpu_xer);
3068    tcg_gen_andi_i32(t1, t1, 0x7F);
3069    t2 = tcg_const_i32(rS(ctx->opcode));
3070    gen_helper_stsw(cpu_env, t0, t1, t2);
3071    tcg_temp_free(t0);
3072    tcg_temp_free_i32(t1);
3073    tcg_temp_free_i32(t2);
3074}
3075
3076/***                        Memory synchronisation                         ***/
3077/* eieio */
3078static void gen_eieio(DisasContext *ctx)
3079{
3080    TCGBar bar = TCG_MO_LD_ST;
3081
3082    /*
3083     * POWER9 has a eieio instruction variant using bit 6 as a hint to
3084     * tell the CPU it is a store-forwarding barrier.
3085     */
3086    if (ctx->opcode & 0x2000000) {
3087        /*
3088         * ISA says that "Reserved fields in instructions are ignored
3089         * by the processor". So ignore the bit 6 on non-POWER9 CPU but
3090         * as this is not an instruction software should be using,
3091         * complain to the user.
3092         */
3093        if (!(ctx->insns_flags2 & PPC2_ISA300)) {
3094            qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @"
3095                          TARGET_FMT_lx "\n", ctx->base.pc_next - 4);
3096        } else {
3097            bar = TCG_MO_ST_LD;
3098        }
3099    }
3100
3101    tcg_gen_mb(bar | TCG_BAR_SC);
3102}
3103
3104#if !defined(CONFIG_USER_ONLY)
3105static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
3106{
3107    TCGv_i32 t;
3108    TCGLabel *l;
3109
3110    if (!ctx->lazy_tlb_flush) {
3111        return;
3112    }
3113    l = gen_new_label();
3114    t = tcg_temp_new_i32();
3115    tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
3116    tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
3117    if (global) {
3118        gen_helper_check_tlb_flush_global(cpu_env);
3119    } else {
3120        gen_helper_check_tlb_flush_local(cpu_env);
3121    }
3122    gen_set_label(l);
3123    tcg_temp_free_i32(t);
3124}
3125#else
3126static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
3127#endif
3128
3129/* isync */
3130static void gen_isync(DisasContext *ctx)
3131{
3132    /*
3133     * We need to check for a pending TLB flush. This can only happen in
3134     * kernel mode however so check MSR_PR
3135     */
3136    if (!ctx->pr) {
3137        gen_check_tlb_flush(ctx, false);
3138    }
3139    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
3140    gen_stop_exception(ctx);
3141}
3142
3143#define MEMOP_GET_SIZE(x)  (1 << ((x) & MO_SIZE))
3144
3145static void gen_load_locked(DisasContext *ctx, MemOp memop)
3146{
3147    TCGv gpr = cpu_gpr[rD(ctx->opcode)];
3148    TCGv t0 = tcg_temp_new();
3149
3150    gen_set_access_type(ctx, ACCESS_RES);
3151    gen_addr_reg_index(ctx, t0);
3152    tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN);
3153    tcg_gen_mov_tl(cpu_reserve, t0);
3154    tcg_gen_mov_tl(cpu_reserve_val, gpr);
3155    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3156    tcg_temp_free(t0);
3157}
3158
3159#define LARX(name, memop)                  \
3160static void gen_##name(DisasContext *ctx)  \
3161{                                          \
3162    gen_load_locked(ctx, memop);           \
3163}
3164
3165/* lwarx */
3166LARX(lbarx, DEF_MEMOP(MO_UB))
3167LARX(lharx, DEF_MEMOP(MO_UW))
3168LARX(lwarx, DEF_MEMOP(MO_UL))
3169
3170static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop,
3171                                      TCGv EA, TCGCond cond, int addend)
3172{
3173    TCGv t = tcg_temp_new();
3174    TCGv t2 = tcg_temp_new();
3175    TCGv u = tcg_temp_new();
3176
3177    tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3178    tcg_gen_addi_tl(t2, EA, MEMOP_GET_SIZE(memop));
3179    tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop);
3180    tcg_gen_addi_tl(u, t, addend);
3181
3182    /* E.g. for fetch and increment bounded... */
3183    /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */
3184    tcg_gen_movcond_tl(cond, u, t, t2, u, t);
3185    tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop);
3186
3187    /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
3188    tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1));
3189    tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u);
3190
3191    tcg_temp_free(t);
3192    tcg_temp_free(t2);
3193    tcg_temp_free(u);
3194}
3195
3196static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
3197{
3198    uint32_t gpr_FC = FC(ctx->opcode);
3199    TCGv EA = tcg_temp_new();
3200    int rt = rD(ctx->opcode);
3201    bool need_serial;
3202    TCGv src, dst;
3203
3204    gen_addr_register(ctx, EA);
3205    dst = cpu_gpr[rt];
3206    src = cpu_gpr[(rt + 1) & 31];
3207
3208    need_serial = false;
3209    memop |= MO_ALIGN;
3210    switch (gpr_FC) {
3211    case 0: /* Fetch and add */
3212        tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
3213        break;
3214    case 1: /* Fetch and xor */
3215        tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
3216        break;
3217    case 2: /* Fetch and or */
3218        tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
3219        break;
3220    case 3: /* Fetch and 'and' */
3221        tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
3222        break;
3223    case 4:  /* Fetch and max unsigned */
3224        tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
3225        break;
3226    case 5:  /* Fetch and max signed */
3227        tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
3228        break;
3229    case 6:  /* Fetch and min unsigned */
3230        tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
3231        break;
3232    case 7:  /* Fetch and min signed */
3233        tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
3234        break;
3235    case 8: /* Swap */
3236        tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
3237        break;
3238
3239    case 16: /* Compare and swap not equal */
3240        if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3241            need_serial = true;
3242        } else {
3243            TCGv t0 = tcg_temp_new();
3244            TCGv t1 = tcg_temp_new();
3245
3246            tcg_gen_qemu_ld_tl(t0, EA, ctx->mem_idx, memop);
3247            if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) {
3248                tcg_gen_mov_tl(t1, src);
3249            } else {
3250                tcg_gen_ext32u_tl(t1, src);
3251            }
3252            tcg_gen_movcond_tl(TCG_COND_NE, t1, t0, t1,
3253                               cpu_gpr[(rt + 2) & 31], t0);
3254            tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop);
3255            tcg_gen_mov_tl(dst, t0);
3256
3257            tcg_temp_free(t0);
3258            tcg_temp_free(t1);
3259        }
3260        break;
3261
3262    case 24: /* Fetch and increment bounded */
3263        if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3264            need_serial = true;
3265        } else {
3266            gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1);
3267        }
3268        break;
3269    case 25: /* Fetch and increment equal */
3270        if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3271            need_serial = true;
3272        } else {
3273            gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1);
3274        }
3275        break;
3276    case 28: /* Fetch and decrement bounded */
3277        if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3278            need_serial = true;
3279        } else {
3280            gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1);
3281        }
3282        break;
3283
3284    default:
3285        /* invoke data storage error handler */
3286        gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3287    }
3288    tcg_temp_free(EA);
3289
3290    if (need_serial) {
3291        /* Restart with exclusive lock.  */
3292        gen_helper_exit_atomic(cpu_env);
3293        ctx->base.is_jmp = DISAS_NORETURN;
3294    }
3295}
3296
3297static void gen_lwat(DisasContext *ctx)
3298{
3299    gen_ld_atomic(ctx, DEF_MEMOP(MO_UL));
3300}
3301
3302#ifdef TARGET_PPC64
3303static void gen_ldat(DisasContext *ctx)
3304{
3305    gen_ld_atomic(ctx, DEF_MEMOP(MO_Q));
3306}
3307#endif
3308
3309static void gen_st_atomic(DisasContext *ctx, MemOp memop)
3310{
3311    uint32_t gpr_FC = FC(ctx->opcode);
3312    TCGv EA = tcg_temp_new();
3313    TCGv src, discard;
3314
3315    gen_addr_register(ctx, EA);
3316    src = cpu_gpr[rD(ctx->opcode)];
3317    discard = tcg_temp_new();
3318
3319    memop |= MO_ALIGN;
3320    switch (gpr_FC) {
3321    case 0: /* add and Store */
3322        tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3323        break;
3324    case 1: /* xor and Store */
3325        tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3326        break;
3327    case 2: /* Or and Store */
3328        tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3329        break;
3330    case 3: /* 'and' and Store */
3331        tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3332        break;
3333    case 4:  /* Store max unsigned */
3334        tcg_gen_atomic_umax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3335        break;
3336    case 5:  /* Store max signed */
3337        tcg_gen_atomic_smax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3338        break;
3339    case 6:  /* Store min unsigned */
3340        tcg_gen_atomic_umin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3341        break;
3342    case 7:  /* Store min signed */
3343        tcg_gen_atomic_smin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3344        break;
3345    case 24: /* Store twin  */
3346        if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3347            /* Restart with exclusive lock.  */
3348            gen_helper_exit_atomic(cpu_env);
3349            ctx->base.is_jmp = DISAS_NORETURN;
3350        } else {
3351            TCGv t = tcg_temp_new();
3352            TCGv t2 = tcg_temp_new();
3353            TCGv s = tcg_temp_new();
3354            TCGv s2 = tcg_temp_new();
3355            TCGv ea_plus_s = tcg_temp_new();
3356
3357            tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3358            tcg_gen_addi_tl(ea_plus_s, EA, MEMOP_GET_SIZE(memop));
3359            tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop);
3360            tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t);
3361            tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2);
3362            tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop);
3363            tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop);
3364
3365            tcg_temp_free(ea_plus_s);
3366            tcg_temp_free(s2);
3367            tcg_temp_free(s);
3368            tcg_temp_free(t2);
3369            tcg_temp_free(t);
3370        }
3371        break;
3372    default:
3373        /* invoke data storage error handler */
3374        gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3375    }
3376    tcg_temp_free(discard);
3377    tcg_temp_free(EA);
3378}
3379
3380static void gen_stwat(DisasContext *ctx)
3381{
3382    gen_st_atomic(ctx, DEF_MEMOP(MO_UL));
3383}
3384
3385#ifdef TARGET_PPC64
3386static void gen_stdat(DisasContext *ctx)
3387{
3388    gen_st_atomic(ctx, DEF_MEMOP(MO_Q));
3389}
3390#endif
3391
3392static void gen_conditional_store(DisasContext *ctx, MemOp memop)
3393{
3394    TCGLabel *l1 = gen_new_label();
3395    TCGLabel *l2 = gen_new_label();
3396    TCGv t0 = tcg_temp_new();
3397    int reg = rS(ctx->opcode);
3398
3399    gen_set_access_type(ctx, ACCESS_RES);
3400    gen_addr_reg_index(ctx, t0);
3401    tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
3402    tcg_temp_free(t0);
3403
3404    t0 = tcg_temp_new();
3405    tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val,
3406                              cpu_gpr[reg], ctx->mem_idx,
3407                              DEF_MEMOP(memop) | MO_ALIGN);
3408    tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val);
3409    tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3410    tcg_gen_or_tl(t0, t0, cpu_so);
3411    tcg_gen_trunc_tl_i32(cpu_crf[0], t0);
3412    tcg_temp_free(t0);
3413    tcg_gen_br(l2);
3414
3415    gen_set_label(l1);
3416
3417    /*
3418     * Address mismatch implies failure.  But we still need to provide
3419     * the memory barrier semantics of the instruction.
3420     */
3421    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3422    tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
3423
3424    gen_set_label(l2);
3425    tcg_gen_movi_tl(cpu_reserve, -1);
3426}
3427
3428#define STCX(name, memop)                  \
3429static void gen_##name(DisasContext *ctx)  \
3430{                                          \
3431    gen_conditional_store(ctx, memop);     \
3432}
3433
3434STCX(stbcx_, DEF_MEMOP(MO_UB))
3435STCX(sthcx_, DEF_MEMOP(MO_UW))
3436STCX(stwcx_, DEF_MEMOP(MO_UL))
3437
3438#if defined(TARGET_PPC64)
3439/* ldarx */
3440LARX(ldarx, DEF_MEMOP(MO_Q))
3441/* stdcx. */
3442STCX(stdcx_, DEF_MEMOP(MO_Q))
3443
3444/* lqarx */
3445static void gen_lqarx(DisasContext *ctx)
3446{
3447    int rd = rD(ctx->opcode);
3448    TCGv EA, hi, lo;
3449
3450    if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
3451                 (rd == rB(ctx->opcode)))) {
3452        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3453        return;
3454    }
3455
3456    gen_set_access_type(ctx, ACCESS_RES);
3457    EA = tcg_temp_new();
3458    gen_addr_reg_index(ctx, EA);
3459
3460    /* Note that the low part is always in RD+1, even in LE mode.  */
3461    lo = cpu_gpr[rd + 1];
3462    hi = cpu_gpr[rd];
3463
3464    if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3465        if (HAVE_ATOMIC128) {
3466            TCGv_i32 oi = tcg_temp_new_i32();
3467            if (ctx->le_mode) {
3468                tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16,
3469                                                    ctx->mem_idx));
3470                gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
3471            } else {
3472                tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16,
3473                                                    ctx->mem_idx));
3474                gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
3475            }
3476            tcg_temp_free_i32(oi);
3477            tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
3478        } else {
3479            /* Restart with exclusive lock.  */
3480            gen_helper_exit_atomic(cpu_env);
3481            ctx->base.is_jmp = DISAS_NORETURN;
3482            tcg_temp_free(EA);
3483            return;
3484        }
3485    } else if (ctx->le_mode) {
3486        tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16);
3487        tcg_gen_mov_tl(cpu_reserve, EA);
3488        gen_addr_add(ctx, EA, EA, 8);
3489        tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
3490    } else {
3491        tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ | MO_ALIGN_16);
3492        tcg_gen_mov_tl(cpu_reserve, EA);
3493        gen_addr_add(ctx, EA, EA, 8);
3494        tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
3495    }
3496    tcg_temp_free(EA);
3497
3498    tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
3499    tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
3500}
3501
3502/* stqcx. */
3503static void gen_stqcx_(DisasContext *ctx)
3504{
3505    int rs = rS(ctx->opcode);
3506    TCGv EA, hi, lo;
3507
3508    if (unlikely(rs & 1)) {
3509        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3510        return;
3511    }
3512
3513    gen_set_access_type(ctx, ACCESS_RES);
3514    EA = tcg_temp_new();
3515    gen_addr_reg_index(ctx, EA);
3516
3517    /* Note that the low part is always in RS+1, even in LE mode.  */
3518    lo = cpu_gpr[rs + 1];
3519    hi = cpu_gpr[rs];
3520
3521    if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3522        if (HAVE_CMPXCHG128) {
3523            TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16);
3524            if (ctx->le_mode) {
3525                gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env,
3526                                             EA, lo, hi, oi);
3527            } else {
3528                gen_helper_stqcx_be_parallel(cpu_crf[0], cpu_env,
3529                                             EA, lo, hi, oi);
3530            }
3531            tcg_temp_free_i32(oi);
3532        } else {
3533            /* Restart with exclusive lock.  */
3534            gen_helper_exit_atomic(cpu_env);
3535            ctx->base.is_jmp = DISAS_NORETURN;
3536        }
3537        tcg_temp_free(EA);
3538    } else {
3539        TCGLabel *lab_fail = gen_new_label();
3540        TCGLabel *lab_over = gen_new_label();
3541        TCGv_i64 t0 = tcg_temp_new_i64();
3542        TCGv_i64 t1 = tcg_temp_new_i64();
3543
3544        tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lab_fail);
3545        tcg_temp_free(EA);
3546
3547        gen_qemu_ld64_i64(ctx, t0, cpu_reserve);
3548        tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
3549                                     ? offsetof(CPUPPCState, reserve_val2)
3550                                     : offsetof(CPUPPCState, reserve_val)));
3551        tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
3552
3553        tcg_gen_addi_i64(t0, cpu_reserve, 8);
3554        gen_qemu_ld64_i64(ctx, t0, t0);
3555        tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
3556                                     ? offsetof(CPUPPCState, reserve_val)
3557                                     : offsetof(CPUPPCState, reserve_val2)));
3558        tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
3559
3560        /* Success */
3561        gen_qemu_st64_i64(ctx, ctx->le_mode ? lo : hi, cpu_reserve);
3562        tcg_gen_addi_i64(t0, cpu_reserve, 8);
3563        gen_qemu_st64_i64(ctx, ctx->le_mode ? hi : lo, t0);
3564
3565        tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
3566        tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
3567        tcg_gen_br(lab_over);
3568
3569        gen_set_label(lab_fail);
3570        tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
3571
3572        gen_set_label(lab_over);
3573        tcg_gen_movi_tl(cpu_reserve, -1);
3574        tcg_temp_free_i64(t0);
3575        tcg_temp_free_i64(t1);
3576    }
3577}
3578#endif /* defined(TARGET_PPC64) */
3579
3580/* sync */
3581static void gen_sync(DisasContext *ctx)
3582{
3583    uint32_t l = (ctx->opcode >> 21) & 3;
3584
3585    /*
3586     * We may need to check for a pending TLB flush.
3587     *
3588     * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32.
3589     *
3590     * Additionally, this can only happen in kernel mode however so
3591     * check MSR_PR as well.
3592     */
3593    if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
3594        gen_check_tlb_flush(ctx, true);
3595    }
3596    tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
3597}
3598
3599/* wait */
3600static void gen_wait(DisasContext *ctx)
3601{
3602    TCGv_i32 t0 = tcg_const_i32(1);
3603    tcg_gen_st_i32(t0, cpu_env,
3604                   -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
3605    tcg_temp_free_i32(t0);
3606    /* Stop translation, as the CPU is supposed to sleep from now */
3607    gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3608}
3609
3610#if defined(TARGET_PPC64)
3611static void gen_doze(DisasContext *ctx)
3612{
3613#if defined(CONFIG_USER_ONLY)
3614    GEN_PRIV;
3615#else
3616    TCGv_i32 t;
3617
3618    CHK_HV;
3619    t = tcg_const_i32(PPC_PM_DOZE);
3620    gen_helper_pminsn(cpu_env, t);
3621    tcg_temp_free_i32(t);
3622    /* Stop translation, as the CPU is supposed to sleep from now */
3623    gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3624#endif /* defined(CONFIG_USER_ONLY) */
3625}
3626
3627static void gen_nap(DisasContext *ctx)
3628{
3629#if defined(CONFIG_USER_ONLY)
3630    GEN_PRIV;
3631#else
3632    TCGv_i32 t;
3633
3634    CHK_HV;
3635    t = tcg_const_i32(PPC_PM_NAP);
3636    gen_helper_pminsn(cpu_env, t);
3637    tcg_temp_free_i32(t);
3638    /* Stop translation, as the CPU is supposed to sleep from now */
3639    gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3640#endif /* defined(CONFIG_USER_ONLY) */
3641}
3642
3643static void gen_stop(DisasContext *ctx)
3644{
3645#if defined(CONFIG_USER_ONLY)
3646    GEN_PRIV;
3647#else
3648    TCGv_i32 t;
3649
3650    CHK_HV;
3651    t = tcg_const_i32(PPC_PM_STOP);
3652    gen_helper_pminsn(cpu_env, t);
3653    tcg_temp_free_i32(t);
3654    /* Stop translation, as the CPU is supposed to sleep from now */
3655    gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3656#endif /* defined(CONFIG_USER_ONLY) */
3657}
3658
3659static void gen_sleep(DisasContext *ctx)
3660{
3661#if defined(CONFIG_USER_ONLY)
3662    GEN_PRIV;
3663#else
3664    TCGv_i32 t;
3665
3666    CHK_HV;
3667    t = tcg_const_i32(PPC_PM_SLEEP);
3668    gen_helper_pminsn(cpu_env, t);
3669    tcg_temp_free_i32(t);
3670    /* Stop translation, as the CPU is supposed to sleep from now */
3671    gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3672#endif /* defined(CONFIG_USER_ONLY) */
3673}
3674
3675static void gen_rvwinkle(DisasContext *ctx)
3676{
3677#if defined(CONFIG_USER_ONLY)
3678    GEN_PRIV;
3679#else
3680    TCGv_i32 t;
3681
3682    CHK_HV;
3683    t = tcg_const_i32(PPC_PM_RVWINKLE);
3684    gen_helper_pminsn(cpu_env, t);
3685    tcg_temp_free_i32(t);
3686    /* Stop translation, as the CPU is supposed to sleep from now */
3687    gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3688#endif /* defined(CONFIG_USER_ONLY) */
3689}
3690#endif /* #if defined(TARGET_PPC64) */
3691
3692static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
3693{
3694#if defined(TARGET_PPC64)
3695    if (ctx->has_cfar) {
3696        tcg_gen_movi_tl(cpu_cfar, nip);
3697    }
3698#endif
3699}
3700
3701static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
3702{
3703    if (unlikely(ctx->singlestep_enabled)) {
3704        return false;
3705    }
3706
3707#ifndef CONFIG_USER_ONLY
3708    return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
3709#else
3710    return true;
3711#endif
3712}
3713
3714static void gen_lookup_and_goto_ptr(DisasContext *ctx)
3715{
3716    int sse = ctx->singlestep_enabled;
3717    if (unlikely(sse)) {
3718        if (sse & GDBSTUB_SINGLE_STEP) {
3719            gen_debug_exception(ctx);
3720        } else if (sse & (CPU_SINGLE_STEP | CPU_BRANCH_STEP)) {
3721            uint32_t excp = gen_prep_dbgex(ctx);
3722            gen_exception(ctx, excp);
3723        }
3724        tcg_gen_exit_tb(NULL, 0);
3725    } else {
3726        tcg_gen_lookup_and_goto_ptr();
3727    }
3728}
3729
3730/***                                Branch                                 ***/
3731static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
3732{
3733    if (NARROW_MODE(ctx)) {
3734        dest = (uint32_t) dest;
3735    }
3736    if (use_goto_tb(ctx, dest)) {
3737        tcg_gen_goto_tb(n);
3738        tcg_gen_movi_tl(cpu_nip, dest & ~3);
3739        tcg_gen_exit_tb(ctx->base.tb, n);
3740    } else {
3741        tcg_gen_movi_tl(cpu_nip, dest & ~3);
3742        gen_lookup_and_goto_ptr(ctx);
3743    }
3744}
3745
3746static inline void gen_setlr(DisasContext *ctx, target_ulong nip)
3747{
3748    if (NARROW_MODE(ctx)) {
3749        nip = (uint32_t)nip;
3750    }
3751    tcg_gen_movi_tl(cpu_lr, nip);
3752}
3753
3754/* b ba bl bla */
3755static void gen_b(DisasContext *ctx)
3756{
3757    target_ulong li, target;
3758
3759    ctx->exception = POWERPC_EXCP_BRANCH;
3760    /* sign extend LI */
3761    li = LI(ctx->opcode);
3762    li = (li ^ 0x02000000) - 0x02000000;
3763    if (likely(AA(ctx->opcode) == 0)) {
3764        target = ctx->base.pc_next + li - 4;
3765    } else {
3766        target = li;
3767    }
3768    if (LK(ctx->opcode)) {
3769        gen_setlr(ctx, ctx->base.pc_next);
3770    }
3771    gen_update_cfar(ctx, ctx->base.pc_next - 4);
3772    gen_goto_tb(ctx, 0, target);
3773}
3774
3775#define BCOND_IM  0
3776#define BCOND_LR  1
3777#define BCOND_CTR 2
3778#define BCOND_TAR 3
3779
3780static void gen_bcond(DisasContext *ctx, int type)
3781{
3782    uint32_t bo = BO(ctx->opcode);
3783    TCGLabel *l1;
3784    TCGv target;
3785    ctx->exception = POWERPC_EXCP_BRANCH;
3786
3787    if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
3788        target = tcg_temp_local_new();
3789        if (type == BCOND_CTR) {
3790            tcg_gen_mov_tl(target, cpu_ctr);
3791        } else if (type == BCOND_TAR) {
3792            gen_load_spr(target, SPR_TAR);
3793        } else {
3794            tcg_gen_mov_tl(target, cpu_lr);
3795        }
3796    } else {
3797        target = NULL;
3798    }
3799    if (LK(ctx->opcode)) {
3800        gen_setlr(ctx, ctx->base.pc_next);
3801    }
3802    l1 = gen_new_label();
3803    if ((bo & 0x4) == 0) {
3804        /* Decrement and test CTR */
3805        TCGv temp = tcg_temp_new();
3806
3807        if (type == BCOND_CTR) {
3808            /*
3809             * All ISAs up to v3 describe this form of bcctr as invalid but
3810             * some processors, ie. 64-bit server processors compliant with
3811             * arch 2.x, do implement a "test and decrement" logic instead,
3812             * as described in their respective UMs. This logic involves CTR
3813             * to act as both the branch target and a counter, which makes
3814             * it basically useless and thus never used in real code.
3815             *
3816             * This form was hence chosen to trigger extra micro-architectural
3817             * side-effect on real HW needed for the Spectre v2 workaround.
3818             * It is up to guests that implement such workaround, ie. linux, to
3819             * use this form in a way it just triggers the side-effect without
3820             * doing anything else harmful.
3821             */
3822            if (unlikely(!is_book3s_arch2x(ctx))) {
3823                gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3824                tcg_temp_free(temp);
3825                tcg_temp_free(target);
3826                return;
3827            }
3828
3829            if (NARROW_MODE(ctx)) {
3830                tcg_gen_ext32u_tl(temp, cpu_ctr);
3831            } else {
3832                tcg_gen_mov_tl(temp, cpu_ctr);
3833            }
3834            if (bo & 0x2) {
3835                tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
3836            } else {
3837                tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
3838            }
3839            tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
3840        } else {
3841            tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
3842            if (NARROW_MODE(ctx)) {
3843                tcg_gen_ext32u_tl(temp, cpu_ctr);
3844            } else {
3845                tcg_gen_mov_tl(temp, cpu_ctr);
3846            }
3847            if (bo & 0x2) {
3848                tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
3849            } else {
3850                tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
3851            }
3852        }
3853        tcg_temp_free(temp);
3854    }
3855    if ((bo & 0x10) == 0) {
3856        /* Test CR */
3857        uint32_t bi = BI(ctx->opcode);
3858        uint32_t mask = 0x08 >> (bi & 0x03);
3859        TCGv_i32 temp = tcg_temp_new_i32();
3860
3861        if (bo & 0x8) {
3862            tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
3863            tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
3864        } else {
3865            tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
3866            tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
3867        }
3868        tcg_temp_free_i32(temp);
3869    }
3870    gen_update_cfar(ctx, ctx->base.pc_next - 4);
3871    if (type == BCOND_IM) {
3872        target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
3873        if (likely(AA(ctx->opcode) == 0)) {
3874            gen_goto_tb(ctx, 0, ctx->base.pc_next + li - 4);
3875        } else {
3876            gen_goto_tb(ctx, 0, li);
3877        }
3878    } else {
3879        if (NARROW_MODE(ctx)) {
3880            tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
3881        } else {
3882            tcg_gen_andi_tl(cpu_nip, target, ~3);
3883        }
3884        gen_lookup_and_goto_ptr(ctx);
3885        tcg_temp_free(target);
3886    }
3887    if ((bo & 0x14) != 0x14) {
3888        /* fallthrough case */
3889        gen_set_label(l1);
3890        gen_goto_tb(ctx, 1, ctx->base.pc_next);
3891    }
3892}
3893
3894static void gen_bc(DisasContext *ctx)
3895{
3896    gen_bcond(ctx, BCOND_IM);
3897}
3898
3899static void gen_bcctr(DisasContext *ctx)
3900{
3901    gen_bcond(ctx, BCOND_CTR);
3902}
3903
3904static void gen_bclr(DisasContext *ctx)
3905{
3906    gen_bcond(ctx, BCOND_LR);
3907}
3908
3909static void gen_bctar(DisasContext *ctx)
3910{
3911    gen_bcond(ctx, BCOND_TAR);
3912}
3913
3914/***                      Condition register logical                       ***/
3915#define GEN_CRLOGIC(name, tcg_op, opc)                                        \
3916static void glue(gen_, name)(DisasContext *ctx)                               \
3917{                                                                             \
3918    uint8_t bitmask;                                                          \
3919    int sh;                                                                   \
3920    TCGv_i32 t0, t1;                                                          \
3921    sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03);             \
3922    t0 = tcg_temp_new_i32();                                                  \
3923    if (sh > 0)                                                               \
3924        tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh);            \
3925    else if (sh < 0)                                                          \
3926        tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh);           \
3927    else                                                                      \
3928        tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]);                 \
3929    t1 = tcg_temp_new_i32();                                                  \
3930    sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03);             \
3931    if (sh > 0)                                                               \
3932        tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh);            \
3933    else if (sh < 0)                                                          \
3934        tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh);           \
3935    else                                                                      \
3936        tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]);                 \
3937    tcg_op(t0, t0, t1);                                                       \
3938    bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03);                             \
3939    tcg_gen_andi_i32(t0, t0, bitmask);                                        \
3940    tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask);          \
3941    tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1);                  \
3942    tcg_temp_free_i32(t0);                                                    \
3943    tcg_temp_free_i32(t1);                                                    \
3944}
3945
3946/* crand */
3947GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08);
3948/* crandc */
3949GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04);
3950/* creqv */
3951GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09);
3952/* crnand */
3953GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07);
3954/* crnor */
3955GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01);
3956/* cror */
3957GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E);
3958/* crorc */
3959GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D);
3960/* crxor */
3961GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06);
3962
3963/* mcrf */
3964static void gen_mcrf(DisasContext *ctx)
3965{
3966    tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]);
3967}
3968
3969/***                           System linkage                              ***/
3970
3971/* rfi (supervisor only) */
3972static void gen_rfi(DisasContext *ctx)
3973{
3974#if defined(CONFIG_USER_ONLY)
3975    GEN_PRIV;
3976#else
3977    /*
3978     * This instruction doesn't exist anymore on 64-bit server
3979     * processors compliant with arch 2.x
3980     */
3981    if (is_book3s_arch2x(ctx)) {
3982        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3983        return;
3984    }
3985    /* Restore CPU state */
3986    CHK_SV;
3987    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
3988        gen_io_start();
3989    }
3990    gen_update_cfar(ctx, ctx->base.pc_next - 4);
3991    gen_helper_rfi(cpu_env);
3992    gen_sync_exception(ctx);
3993#endif
3994}
3995
3996#if defined(TARGET_PPC64)
3997static void gen_rfid(DisasContext *ctx)
3998{
3999#if defined(CONFIG_USER_ONLY)
4000    GEN_PRIV;
4001#else
4002    /* Restore CPU state */
4003    CHK_SV;
4004    if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
4005        gen_io_start();
4006    }
4007    gen_update_cfar(ctx, ctx->base.pc_next - 4);
4008    gen_helper_rfid(cpu_env);
4009    gen_sync_exception(ctx);
4010#endif
4011}
4012
4013static void gen_hrfid(DisasContext *ctx)
4014{
4015#if defined(CONFIG_USER_ONLY)
4016    GEN_PRIV;
4017#else
4018    /* Restore CPU state */
4019    CHK_HV;
4020    gen_helper_hrfid(cpu_env);
4021    gen_sync_exception(ctx);
4022#endif
4023}
4024#endif
4025
4026/* sc */
4027#if defined(CONFIG_USER_ONLY)
4028#define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER
4029#else
4030#define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL
4031#endif
4032static void gen_sc(DisasContext *ctx)
4033{
4034    uint32_t lev;
4035
4036    lev = (ctx->opcode >> 5) & 0x7F;
4037    gen_exception_err(ctx, POWERPC_SYSCALL, lev);
4038}
4039
4040/***                                Trap                                   ***/
4041
4042/* Check for unconditional traps (always or never) */
4043static bool check_unconditional_trap(DisasContext *ctx)
4044{
4045    /* Trap never */
4046    if (TO(ctx->opcode) == 0) {
4047        return true;
4048    }
4049    /* Trap always */
4050    if (TO(ctx->opcode) == 31) {
4051        gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
4052        return true;
4053    }
4054    return false;
4055}
4056
4057/* tw */
4058static void gen_tw(DisasContext *ctx)
4059{
4060    TCGv_i32 t0;
4061
4062    if (check_unconditional_trap(ctx)) {
4063        return;
4064    }
4065    t0 = tcg_const_i32(TO(ctx->opcode));
4066    gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4067                  t0);
4068    tcg_temp_free_i32(t0);
4069}
4070
4071/* twi */
4072static void gen_twi(DisasContext *ctx)
4073{
4074    TCGv t0;
4075    TCGv_i32 t1;
4076
4077    if (check_unconditional_trap(ctx)) {
4078        return;
4079    }
4080    t0 = tcg_const_tl(SIMM(ctx->opcode));
4081    t1 = tcg_const_i32(TO(ctx->opcode));
4082    gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4083    tcg_temp_free(t0);
4084    tcg_temp_free_i32(t1);
4085}
4086
4087#if defined(TARGET_PPC64)
4088/* td */
4089static void gen_td(DisasContext *ctx)
4090{
4091    TCGv_i32 t0;
4092
4093    if (check_unconditional_trap(ctx)) {
4094        return;
4095    }
4096    t0 = tcg_const_i32(TO(ctx->opcode));
4097    gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4098                  t0);
4099    tcg_temp_free_i32(t0);
4100}
4101
4102/* tdi */
4103static void gen_tdi(DisasContext *ctx)
4104{
4105    TCGv t0;
4106    TCGv_i32 t1;
4107
4108    if (check_unconditional_trap(ctx)) {
4109        return;
4110    }
4111    t0 = tcg_const_tl(SIMM(ctx->opcode));
4112    t1 = tcg_const_i32(TO(ctx->opcode));
4113    gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4114    tcg_temp_free(t0);
4115    tcg_temp_free_i32(t1);
4116}
4117#endif
4118
4119/***                          Processor control                            ***/
4120
4121static void gen_read_xer(DisasContext *ctx, TCGv dst)
4122{
4123    TCGv t0 = tcg_temp_new();
4124    TCGv t1 = tcg_temp_new();
4125    TCGv t2 = tcg_temp_new();
4126    tcg_gen_mov_tl(dst, cpu_xer);
4127    tcg_gen_shli_tl(t0, cpu_so, XER_SO);
4128    tcg_gen_shli_tl(t1, cpu_ov, XER_OV);
4129    tcg_gen_shli_tl(t2, cpu_ca, XER_CA);
4130    tcg_gen_or_tl(t0, t0, t1);
4131    tcg_gen_or_tl(dst, dst, t2);
4132    tcg_gen_or_tl(dst, dst, t0);
4133    if (is_isa300(ctx)) {
4134        tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32);
4135        tcg_gen_or_tl(dst, dst, t0);
4136        tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32);
4137        tcg_gen_or_tl(dst, dst, t0);
4138    }
4139    tcg_temp_free(t0);
4140    tcg_temp_free(t1);
4141    tcg_temp_free(t2);
4142}
4143
4144static void gen_write_xer(TCGv src)
4145{
4146    /* Write all flags, while reading back check for isa300 */
4147    tcg_gen_andi_tl(cpu_xer, src,
4148                    ~((1u << XER_SO) |
4149                      (1u << XER_OV) | (1u << XER_OV32) |
4150                      (1u << XER_CA) | (1u << XER_CA32)));
4151    tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1);
4152    tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1);
4153    tcg_gen_extract_tl(cpu_so, src, XER_SO, 1);
4154    tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1);
4155    tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1);
4156}
4157
4158/* mcrxr */
4159static void gen_mcrxr(DisasContext *ctx)
4160{
4161    TCGv_i32 t0 = tcg_temp_new_i32();
4162    TCGv_i32 t1 = tcg_temp_new_i32();
4163    TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4164
4165    tcg_gen_trunc_tl_i32(t0, cpu_so);
4166    tcg_gen_trunc_tl_i32(t1, cpu_ov);
4167    tcg_gen_trunc_tl_i32(dst, cpu_ca);
4168    tcg_gen_shli_i32(t0, t0, 3);
4169    tcg_gen_shli_i32(t1, t1, 2);
4170    tcg_gen_shli_i32(dst, dst, 1);
4171    tcg_gen_or_i32(dst, dst, t0);
4172    tcg_gen_or_i32(dst, dst, t1);
4173    tcg_temp_free_i32(t0);
4174    tcg_temp_free_i32(t1);
4175
4176    tcg_gen_movi_tl(cpu_so, 0);
4177    tcg_gen_movi_tl(cpu_ov, 0);
4178    tcg_gen_movi_tl(cpu_ca, 0);
4179}
4180
4181#ifdef TARGET_PPC64
4182/* mcrxrx */
4183static void gen_mcrxrx(DisasContext *ctx)
4184{
4185    TCGv t0 = tcg_temp_new();
4186    TCGv t1 = tcg_temp_new();
4187    TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4188
4189    /* copy OV and OV32 */
4190    tcg_gen_shli_tl(t0, cpu_ov, 1);
4191    tcg_gen_or_tl(t0, t0, cpu_ov32);
4192    tcg_gen_shli_tl(t0, t0, 2);
4193    /* copy CA and CA32 */
4194    tcg_gen_shli_tl(t1, cpu_ca, 1);
4195    tcg_gen_or_tl(t1, t1, cpu_ca32);
4196    tcg_gen_or_tl(t0, t0, t1);
4197    tcg_gen_trunc_tl_i32(dst, t0);
4198    tcg_temp_free(t0);
4199    tcg_temp_free(t1);
4200}
4201#endif
4202
4203/* mfcr mfocrf */
4204static void gen_mfcr(DisasContext *ctx)
4205{
4206    uint32_t crm, crn;
4207
4208    if (likely(ctx->opcode & 0x00100000)) {
4209        crm = CRM(ctx->opcode);
4210        if (likely(crm && ((crm & (crm - 1)) == 0))) {
4211            crn = ctz32(crm);
4212            tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]);
4213            tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)],
4214                            cpu_gpr[rD(ctx->opcode)], crn * 4);
4215        }
4216    } else {
4217        TCGv_i32 t0 = tcg_temp_new_i32();
4218        tcg_gen_mov_i32(t0, cpu_crf[0]);
4219        tcg_gen_shli_i32(t0, t0, 4);
4220        tcg_gen_or_i32(t0, t0, cpu_crf[1]);
4221        tcg_gen_shli_i32(t0, t0, 4);
4222        tcg_gen_or_i32(t0, t0, cpu_crf[2]);
4223        tcg_gen_shli_i32(t0, t0, 4);
4224        tcg_gen_or_i32(t0, t0, cpu_crf[3]);
4225        tcg_gen_shli_i32(t0, t0, 4);
4226        tcg_gen_or_i32(t0, t0, cpu_crf[4]);
4227        tcg_gen_shli_i32(t0, t0, 4);
4228        tcg_gen_or_i32(t0, t0, cpu_crf[5]);
4229        tcg_gen_shli_i32(t0, t0, 4);
4230        tcg_gen_or_i32(t0, t0, cpu_crf[6]);
4231        tcg_gen_shli_i32(t0, t0, 4);
4232        tcg_gen_or_i32(t0, t0, cpu_crf[7]);
4233        tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4234        tcg_temp_free_i32(t0);
4235    }
4236}
4237
4238/* mfmsr */
4239static void gen_mfmsr(DisasContext *ctx)
4240{
4241    CHK_SV;
4242    tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr);
4243}
4244
4245static void spr_noaccess(DisasContext *ctx, int gprn, int sprn)
4246{
4247#if 0
4248    sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
4249    printf("ERROR: try to access SPR %d !\n", sprn);
4250#endif
4251}
4252#define SPR_NOACCESS (&spr_noaccess)
4253
4254/* mfspr */
4255static inline void gen_op_mfspr(DisasContext *ctx)
4256{
4257    void (*read_cb)(DisasContext *ctx, int gprn, int sprn);
4258    uint32_t sprn = SPR(ctx->opcode);
4259
4260#if defined(CONFIG_USER_ONLY)
4261    read_cb = ctx->spr_cb[sprn].uea_read;
4262#else
4263    if (ctx->pr) {
4264        read_cb = ctx->spr_cb[sprn].uea_read;
4265    } else if (ctx->hv) {
4266        read_cb = ctx->spr_cb[sprn].hea_read;
4267    } else {
4268        read_cb = ctx->spr_cb[sprn].oea_read;
4269    }
4270#endif
4271    if (likely(read_cb != NULL)) {
4272        if (likely(read_cb != SPR_NOACCESS)) {
4273            (*read_cb)(ctx, rD(ctx->opcode), sprn);
4274        } else {
4275            /* Privilege exception */
4276            /*
4277             * This is a hack to avoid warnings when running Linux:
4278             * this OS breaks the PowerPC virtualisation model,
4279             * allowing userland application to read the PVR
4280             */
4281            if (sprn != SPR_PVR) {
4282                qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr "
4283                              "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4284                              ctx->base.pc_next - 4);
4285            }
4286            gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4287        }
4288    } else {
4289        /* ISA 2.07 defines these as no-ops */
4290        if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4291            (sprn >= 808 && sprn <= 811)) {
4292            /* This is a nop */
4293            return;
4294        }
4295        /* Not defined */
4296        qemu_log_mask(LOG_GUEST_ERROR,
4297                      "Trying to read invalid spr %d (0x%03x) at "
4298                      TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4);
4299
4300        /*
4301         * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4302         * generate a priv, a hv emu or a no-op
4303         */
4304        if (sprn & 0x10) {
4305            if (ctx->pr) {
4306                gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
4307            }
4308        } else {
4309            if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) {
4310                gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
4311            }
4312        }
4313    }
4314}
4315
4316static void gen_mfspr(DisasContext *ctx)
4317{
4318    gen_op_mfspr(ctx);
4319}
4320
4321/* mftb */
4322static void gen_mftb(DisasContext *ctx)
4323{
4324    gen_op_mfspr(ctx);
4325}
4326
4327/* mtcrf mtocrf*/
4328static void gen_mtcrf(DisasContext *ctx)
4329{
4330    uint32_t crm, crn;
4331
4332    crm = CRM(ctx->opcode);
4333    if (likely((ctx->opcode & 0x00100000))) {
4334        if (crm && ((crm & (crm - 1)) == 0)) {
4335            TCGv_i32 temp = tcg_temp_new_i32();
4336            crn = ctz32(crm);
4337            tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4338            tcg_gen_shri_i32(temp, temp, crn * 4);
4339            tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf);
4340            tcg_temp_free_i32(temp);
4341        }
4342    } else {
4343        TCGv_i32 temp = tcg_temp_new_i32();
4344        tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4345        for (crn = 0 ; crn < 8 ; crn++) {
4346            if (crm & (1 << crn)) {
4347                    tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4);
4348                    tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf);
4349            }
4350        }
4351        tcg_temp_free_i32(temp);
4352    }
4353}
4354
4355/* mtmsr */
4356#if defined(TARGET_PPC64)
4357static void gen_mtmsrd(DisasContext *ctx)
4358{
4359    CHK_SV;
4360
4361#if !defined(CONFIG_USER_ONLY)
4362    if (ctx->opcode & 0x00010000) {
4363        /* Special form that does not need any synchronisation */
4364        TCGv t0 = tcg_temp_new();
4365        tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)],
4366                        (1 << MSR_RI) | (1 << MSR_EE));
4367        tcg_gen_andi_tl(cpu_msr, cpu_msr,
4368                        ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE)));
4369        tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
4370        tcg_temp_free(t0);
4371    } else {
4372        /*
4373         * XXX: we need to update nip before the store if we enter
4374         *      power saving mode, we will exit the loop directly from
4375         *      ppc_store_msr
4376         */
4377        if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
4378            gen_io_start();
4379        }
4380        gen_update_nip(ctx, ctx->base.pc_next);
4381        gen_helper_store_msr(cpu_env, cpu_gpr[rS(ctx->opcode)]);
4382        /* Must stop the translation as machine state (may have) changed */
4383        /* Note that mtmsr is not always defined as context-synchronizing */
4384        gen_stop_exception(ctx);
4385    }
4386#endif /* !defined(CONFIG_USER_ONLY) */
4387}
4388#endif /* defined(TARGET_PPC64) */
4389
4390static void gen_mtmsr(DisasContext *ctx)
4391{
4392    CHK_SV;
4393
4394#if !defined(CONFIG_USER_ONLY)
4395   if (ctx->opcode & 0x00010000) {
4396        /* Special form that does not need any synchronisation */
4397        TCGv t0 = tcg_temp_new();
4398        tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)],
4399                        (1 << MSR_RI) | (1 << MSR_EE));
4400        tcg_gen_andi_tl(cpu_msr, cpu_msr,
4401                        ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE)));
4402        tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
4403        tcg_temp_free(t0);
4404    } else {
4405        TCGv msr = tcg_temp_new();
4406
4407        /*
4408         * XXX: we need to update nip before the store if we enter
4409         *      power saving mode, we will exit the loop directly from
4410         *      ppc_store_msr
4411         */
4412        if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
4413            gen_io_start();
4414        }
4415        gen_update_nip(ctx, ctx->base.pc_next);
4416#if defined(TARGET_PPC64)
4417        tcg_gen_deposit_tl(msr, cpu_msr, cpu_gpr[rS(ctx->opcode)], 0, 32);
4418#else
4419        tcg_gen_mov_tl(msr, cpu_gpr[rS(ctx->opcode)]);
4420#endif
4421        gen_helper_store_msr(cpu_env, msr);
4422        tcg_temp_free(msr);
4423        /* Must stop the translation as machine state (may have) changed */
4424        /* Note that mtmsr is not always defined as context-synchronizing */
4425        gen_stop_exception(ctx);
4426    }
4427#endif
4428}
4429
4430/* mtspr */
4431static void gen_mtspr(DisasContext *ctx)
4432{
4433    void (*write_cb)(DisasContext *ctx, int sprn, int gprn);
4434    uint32_t sprn = SPR(ctx->opcode);
4435
4436#if defined(CONFIG_USER_ONLY)
4437    write_cb = ctx->spr_cb[sprn].uea_write;
4438#else
4439    if (ctx->pr) {
4440        write_cb = ctx->spr_cb[sprn].uea_write;
4441    } else if (ctx->hv) {
4442        write_cb = ctx->spr_cb[sprn].hea_write;
4443    } else {
4444        write_cb = ctx->spr_cb[sprn].oea_write;
4445    }
4446#endif
4447    if (likely(write_cb != NULL)) {
4448        if (likely(write_cb != SPR_NOACCESS)) {
4449            (*write_cb)(ctx, sprn, rS(ctx->opcode));
4450        } else {
4451            /* Privilege exception */
4452            qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr "
4453                          "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4454                          ctx->base.pc_next - 4);
4455            gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4456        }
4457    } else {
4458        /* ISA 2.07 defines these as no-ops */
4459        if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4460            (sprn >= 808 && sprn <= 811)) {
4461            /* This is a nop */
4462            return;
4463        }
4464
4465        /* Not defined */
4466        qemu_log_mask(LOG_GUEST_ERROR,
4467                      "Trying to write invalid spr %d (0x%03x) at "
4468                      TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4);
4469
4470
4471        /*
4472         * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4473         * generate a priv, a hv emu or a no-op
4474         */
4475        if (sprn & 0x10) {
4476            if (ctx->pr) {
4477                gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
4478            }
4479        } else {
4480            if (ctx->pr || sprn == 0) {
4481                gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
4482            }
4483        }
4484    }
4485}
4486
4487#if defined(TARGET_PPC64)
4488/* setb */
4489static void gen_setb(DisasContext *ctx)
4490{
4491    TCGv_i32 t0 = tcg_temp_new_i32();
4492    TCGv_i32 t8 = tcg_temp_new_i32();
4493    TCGv_i32 tm1 = tcg_temp_new_i32();
4494    int crf = crfS(ctx->opcode);
4495
4496    tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4);
4497    tcg_gen_movi_i32(t8, 8);
4498    tcg_gen_movi_i32(tm1, -1);
4499    tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0);
4500    tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4501
4502    tcg_temp_free_i32(t0);
4503    tcg_temp_free_i32(t8);
4504    tcg_temp_free_i32(tm1);
4505}
4506#endif
4507
4508/***                         Cache management                              ***/
4509
4510/* dcbf */
4511static void gen_dcbf(DisasContext *ctx)
4512{
4513    /* XXX: specification says this is treated as a load by the MMU */
4514    TCGv t0;
4515    gen_set_access_type(ctx, ACCESS_CACHE);
4516    t0 = tcg_temp_new();
4517    gen_addr_reg_index(ctx, t0);
4518    gen_qemu_ld8u(ctx, t0, t0);
4519    tcg_temp_free(t0);
4520}
4521
4522/* dcbfep (external PID dcbf) */
4523static void gen_dcbfep(DisasContext *ctx)
4524{
4525    /* XXX: specification says this is treated as a load by the MMU */
4526    TCGv t0;
4527    CHK_SV;
4528    gen_set_access_type(ctx, ACCESS_CACHE);
4529    t0 = tcg_temp_new();
4530    gen_addr_reg_index(ctx, t0);
4531    tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
4532    tcg_temp_free(t0);
4533}
4534
4535/* dcbi (Supervisor only) */
4536static void gen_dcbi(DisasContext *ctx)
4537{
4538#if defined(CONFIG_USER_ONLY)
4539    GEN_PRIV;
4540#else
4541    TCGv EA, val;
4542
4543    CHK_SV;
4544    EA = tcg_temp_new();
4545    gen_set_access_type(ctx, ACCESS_CACHE);
4546    gen_addr_reg_index(ctx, EA);
4547    val = tcg_temp_new();
4548    /* XXX: specification says this should be treated as a store by the MMU */
4549    gen_qemu_ld8u(ctx, val, EA);
4550    gen_qemu_st8(ctx, val, EA);
4551    tcg_temp_free(val);
4552    tcg_temp_free(EA);
4553#endif /* defined(CONFIG_USER_ONLY) */
4554}
4555
4556/* dcdst */
4557static void gen_dcbst(DisasContext *ctx)
4558{
4559    /* XXX: specification say this is treated as a load by the MMU */
4560    TCGv t0;
4561    gen_set_access_type(ctx, ACCESS_CACHE);
4562    t0 = tcg_temp_new();
4563    gen_addr_reg_index(ctx, t0);
4564    gen_qemu_ld8u(ctx, t0, t0);
4565    tcg_temp_free(t0);
4566}
4567
4568/* dcbstep (dcbstep External PID version) */
4569static void gen_dcbstep(DisasContext *ctx)
4570{
4571    /* XXX: specification say this is treated as a load by the MMU */
4572    TCGv t0;
4573    gen_set_access_type(ctx, ACCESS_CACHE);
4574    t0 = tcg_temp_new();
4575    gen_addr_reg_index(ctx, t0);
4576    tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
4577    tcg_temp_free(t0);
4578}
4579
4580/* dcbt */
4581static void gen_dcbt(DisasContext *ctx)
4582{
4583    /*
4584     * interpreted as no-op
4585     * XXX: specification say this is treated as a load by the MMU but
4586     *      does not generate any exception
4587     */
4588}
4589
4590/* dcbtep */
4591static void gen_dcbtep(DisasContext *ctx)
4592{
4593    /*
4594     * interpreted as no-op
4595     * XXX: specification say this is treated as a load by the MMU but
4596     *      does not generate any exception
4597     */
4598}
4599
4600/* dcbtst */
4601static void gen_dcbtst(DisasContext *ctx)
4602{
4603    /*
4604     * interpreted as no-op
4605     * XXX: specification say this is treated as a load by the MMU but
4606     *      does not generate any exception
4607     */
4608}
4609
4610/* dcbtstep */
4611static void gen_dcbtstep(DisasContext *ctx)
4612{
4613    /*
4614     * interpreted as no-op
4615     * XXX: specification say this is treated as a load by the MMU but
4616     *      does not generate any exception
4617     */
4618}
4619
4620/* dcbtls */
4621static void gen_dcbtls(DisasContext *ctx)
4622{
4623    /* Always fails locking the cache */
4624    TCGv t0 = tcg_temp_new();
4625    gen_load_spr(t0, SPR_Exxx_L1CSR0);
4626    tcg_gen_ori_tl(t0, t0, L1CSR0_CUL);
4627    gen_store_spr(SPR_Exxx_L1CSR0, t0);
4628    tcg_temp_free(t0);
4629}
4630
4631/* dcbz */
4632static void gen_dcbz(DisasContext *ctx)
4633{
4634    TCGv tcgv_addr;
4635    TCGv_i32 tcgv_op;
4636
4637    gen_set_access_type(ctx, ACCESS_CACHE);
4638    tcgv_addr = tcg_temp_new();
4639    tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
4640    gen_addr_reg_index(ctx, tcgv_addr);
4641    gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
4642    tcg_temp_free(tcgv_addr);
4643    tcg_temp_free_i32(tcgv_op);
4644}
4645
4646/* dcbzep */
4647static void gen_dcbzep(DisasContext *ctx)
4648{
4649    TCGv tcgv_addr;
4650    TCGv_i32 tcgv_op;
4651
4652    gen_set_access_type(ctx, ACCESS_CACHE);
4653    tcgv_addr = tcg_temp_new();
4654    tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
4655    gen_addr_reg_index(ctx, tcgv_addr);
4656    gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op);
4657    tcg_temp_free(tcgv_addr);
4658    tcg_temp_free_i32(tcgv_op);
4659}
4660
4661/* dst / dstt */
4662static void gen_dst(DisasContext *ctx)
4663{
4664    if (rA(ctx->opcode) == 0) {
4665        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4666    } else {
4667        /* interpreted as no-op */
4668    }
4669}
4670
4671/* dstst /dststt */
4672static void gen_dstst(DisasContext *ctx)
4673{
4674    if (rA(ctx->opcode) == 0) {
4675        gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4676    } else {
4677        /* interpreted as no-op */
4678    }
4679
4680}
4681
4682/* dss / dssall */
4683static void gen_dss(DisasContext *ctx)
4684{
4685    /* interpreted as no-op */
4686}
4687
4688/* icbi */
4689static void gen_icbi(DisasContext *ctx)
4690{
4691    TCGv t0;
4692    gen_set_access_type(ctx, ACCESS_CACHE);
4693    t0 = tcg_temp_new();
4694    gen_addr_reg_index(ctx, t0);
4695    gen_helper_icbi(cpu_env, t0);
4696    tcg_temp_free(t0);
4697}
4698
4699/* icbiep */
4700static void gen_icbiep(DisasContext *ctx)
4701{
4702    TCGv t0;
4703    gen_set_access_type(ctx, ACCESS_CACHE);
4704    t0 = tcg_temp_new();
4705    gen_addr_reg_index(ctx, t0);
4706    gen_helper_icbiep(cpu_env, t0);
4707    tcg_temp_free(t0);
4708}
4709
4710/* Optional: */
4711/* dcba */
4712static void gen_dcba(DisasContext *ctx)
4713{
4714    /*
4715     * interpreted as no-op
4716     * XXX: specification say this is treated as a store by the MMU
4717     *      but does not generate any exception
4718     */
4719}
4720
4721/***                    Segment register manipulation                      ***/
4722/* Supervisor only: */
4723
4724/* mfsr */
4725static void gen_mfsr(DisasContext *ctx)
4726{
4727#if defined(CONFIG_USER_ONLY)
4728    GEN_PRIV;
4729#else
4730    TCGv t0;
4731
4732    CHK_SV;
4733    t0 = tcg_const_tl(SR(ctx->opcode));
4734    gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
4735    tcg_temp_free(t0);
4736#endif /* defined(CONFIG_USER_ONLY) */
4737}
4738
4739/* mfsrin */
4740static void gen_mfsrin(DisasContext *ctx)
4741{
4742#if defined(CONFIG_USER_ONLY)
4743    GEN_PRIV;
4744#else
4745    TCGv t0;
4746
4747    CHK_SV;
4748    t0 = tcg_temp_new();
4749    tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
4750    gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
4751    tcg_temp_free(t0);
4752#endif /* defined(CONFIG_USER_ONLY) */
4753}
4754
4755/* mtsr */
4756static void gen_mtsr(DisasContext *ctx)
4757{
4758#if defined(CONFIG_USER_ONLY)
4759    GEN_PRIV;
4760#else
4761    TCGv t0;
4762
4763    CHK_SV;
4764    t0 = tcg_const_tl(SR(ctx->opcode));
4765    gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
4766    tcg_temp_free(t0);
4767#endif /* defined(CONFIG_USER_ONLY) */
4768}
4769
4770/* mtsrin */
4771static void gen_mtsrin(DisasContext *ctx)
4772{
4773#if defined(CONFIG_USER_ONLY)
4774    GEN_PRIV;
4775#else
4776    TCGv t0;
4777    CHK_SV;
4778
4779    t0 = tcg_temp_new();
4780    tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
4781    gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]);
4782    tcg_temp_free(t0);
4783#endif /* defined(CONFIG_USER_ONLY) */
4784}
4785
4786#if defined(TARGET_PPC64)
4787/* Specific implementation for PowerPC 64 "bridge" emulation using SLB */
4788
4789/* mfsr */
4790static void gen_mfsr_64b(DisasContext *ctx)
4791{
4792#if defined(CONFIG_USER_ONLY)
4793    GEN_PRIV;
4794#else
4795    TCGv t0;
4796
4797    CHK_SV;
4798    t0 = tcg_const_tl(SR(ctx->opcode));
4799    gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
4800    tcg_temp_free(t0);
4801#endif /* defined(CONFIG_USER_ONLY) */
4802}
4803
4804/* mfsrin */
4805static void gen_mfsrin_64b(DisasContext *ctx)
4806{
4807#if defined(CONFIG_USER_ONLY)
4808    GEN_PRIV;
4809#else
4810    TCGv t0;
4811
4812    CHK_SV;
4813    t0 = tcg_temp_new();
4814    tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
4815    gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
4816    tcg_temp_free(t0);
4817#endif /* defined(CONFIG_USER_ONLY) */
4818}
4819
4820/* mtsr */
4821static void gen_mtsr_64b(DisasContext *ctx)
4822{
4823#if defined(CONFIG_USER_ONLY)
4824    GEN_PRIV;
4825#else
4826    TCGv t0;
4827
4828    CHK_SV;
4829    t0 = tcg_const_tl(SR(ctx->opcode));
4830    gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
4831    tcg_temp_free(t0);
4832#endif /* defined(CONFIG_USER_ONLY) */
4833}
4834
4835/* mtsrin */
4836static void gen_mtsrin_64b(DisasContext *ctx)
4837{
4838#if defined(CONFIG_USER_ONLY)
4839    GEN_PRIV;
4840#else
4841    TCGv t0;
4842
4843    CHK_SV;
4844    t0 = tcg_temp_new();
4845    tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
4846    gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
4847    tcg_temp_free(t0);
4848#endif /* defined(CONFIG_USER_ONLY) */
4849}
4850
4851/* slbmte */
4852static void gen_slbmte(DisasContext *ctx)
4853{
4854#if defined(CONFIG_USER_ONLY)
4855    GEN_PRIV;
4856#else
4857    CHK_SV;
4858
4859    gen_helper_store_slb(cpu_env, cpu_gpr[rB(ctx->opcode)],
4860                         cpu_gpr[rS(ctx->opcode)]);
4861#endif /* defined(CONFIG_USER_ONLY) */
4862}
4863
4864static void gen_slbmfee(DisasContext *ctx)
4865{
4866#if defined(CONFIG_USER_ONLY)
4867    GEN_PRIV;
4868#else
4869    CHK_SV;
4870
4871    gen_helper_load_slb_esid(cpu_gpr[rS(ctx->opcode)], cpu_env,
4872                             cpu_gpr[rB(ctx->opcode)]);
4873#endif /* defined(CONFIG_USER_ONLY) */
4874}
4875
4876static void gen_slbmfev(DisasContext *ctx)
4877{
4878#if defined(CONFIG_USER_ONLY)
4879    GEN_PRIV;
4880#else
4881    CHK_SV;
4882
4883    gen_helper_load_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env,
4884                             cpu_gpr[rB(ctx->opcode)]);
4885#endif /* defined(CONFIG_USER_ONLY) */
4886}
4887
4888static void gen_slbfee_(DisasContext *ctx)
4889{
4890#if defined(CONFIG_USER_ONLY)
4891    gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
4892#else
4893    TCGLabel *l1, *l2;
4894
4895    if (unlikely(ctx->pr)) {
4896        gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
4897        return;
4898    }
4899    gen_helper_find_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env,
4900                             cpu_gpr[rB(ctx->opcode)]);
4901    l1 = gen_new_label();
4902    l2 = gen_new_label();
4903    tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
4904    tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rS(ctx->opcode)], -1, l1);
4905    tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
4906    tcg_gen_br(l2);
4907    gen_set_label(l1);
4908    tcg_gen_movi_tl(cpu_gpr[rS(ctx->opcode)], 0);
4909    gen_set_label(l2);
4910#endif
4911}
4912#endif /* defined(TARGET_PPC64) */
4913
4914/***                      Lookaside buffer management                      ***/
4915/* Optional & supervisor only: */
4916
4917/* tlbia */
4918static void gen_tlbia(DisasContext *ctx)
4919{
4920#if defined(CONFIG_USER_ONLY)
4921    GEN_PRIV;
4922#else
4923    CHK_HV;
4924
4925    gen_helper_tlbia(cpu_env);
4926#endif  /* defined(CONFIG_USER_ONLY) */
4927}
4928
4929/* tlbiel */
4930static void gen_tlbiel(DisasContext *ctx)
4931{
4932#if defined(CONFIG_USER_ONLY)
4933    GEN_PRIV;
4934#else
4935    CHK_SV;
4936
4937    gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
4938#endif /* defined(CONFIG_USER_ONLY) */
4939}
4940
4941/* tlbie */
4942static void gen_tlbie(DisasContext *ctx)
4943{
4944#if defined(CONFIG_USER_ONLY)
4945    GEN_PRIV;
4946#else
4947    TCGv_i32 t1;
4948
4949    if (ctx->gtse) {
4950        CHK_SV; /* If gtse is set then tlbie is supervisor privileged */
4951    } else {
4952        CHK_HV; /* Else hypervisor privileged */
4953    }
4954
4955    if (NARROW_MODE(ctx)) {
4956        TCGv t0 = tcg_temp_new();
4957        tcg_gen_ext32u_tl(t0, cpu_gpr[rB(ctx->opcode)]);
4958        gen_helper_tlbie(cpu_env, t0);
4959        tcg_temp_free(t0);
4960    } else {
4961        gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
4962    }
4963    t1 = tcg_temp_new_i32();
4964    tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
4965    tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH);
4966    tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
4967    tcg_temp_free_i32(t1);
4968#endif /* defined(CONFIG_USER_ONLY) */
4969}
4970
4971/* tlbsync */
4972static void gen_tlbsync(DisasContext *ctx)
4973{
4974#if defined(CONFIG_USER_ONLY)
4975    GEN_PRIV;
4976#else
4977
4978    if (ctx->gtse) {
4979        CHK_SV; /* If gtse is set then tlbsync is supervisor privileged */
4980    } else {
4981        CHK_HV; /* Else hypervisor privileged */
4982    }
4983
4984    /* BookS does both ptesync and tlbsync make tlbsync a nop for server */
4985    if (ctx->insns_flags & PPC_BOOKE) {
4986        gen_check_tlb_flush(ctx, true);
4987    }
4988#endif /* defined(CONFIG_USER_ONLY) */
4989}
4990
4991#if defined(TARGET_PPC64)
4992/* slbia */
4993static void gen_slbia(DisasContext *ctx)
4994{
4995#if defined(CONFIG_USER_ONLY)
4996    GEN_PRIV;
4997#else
4998    CHK_SV;
4999
5000    gen_helper_slbia(cpu_env);
5001#endif /* defined(CONFIG_USER_ONLY) */
5002}
5003
5004/* slbie */
5005static void gen_slbie(DisasContext *ctx)
5006{
5007#if defined(CONFIG_USER_ONLY)
5008    GEN_PRIV;
5009#else
5010    CHK_SV;
5011
5012    gen_helper_slbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5013#endif /* defined(CONFIG_USER_ONLY) */
5014}
5015
5016/* slbieg */
5017static void gen_slbieg(DisasContext *ctx)
5018{
5019#if defined(CONFIG_USER_ONLY)
5020    GEN_PRIV;
5021#else
5022    CHK_SV;
5023
5024    gen_helper_slbieg(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5025#endif /* defined(CONFIG_USER_ONLY) */
5026}
5027
5028/* slbsync */
5029static void gen_slbsync(DisasContext *ctx)
5030{
5031#if defined(CONFIG_USER_ONLY)
5032    GEN_PRIV;
5033#else
5034    CHK_SV;
5035    gen_check_tlb_flush(ctx, true);
5036#endif /* defined(CONFIG_USER_ONLY) */
5037}
5038
5039#endif  /* defined(TARGET_PPC64) */
5040
5041/***                              External control                         ***/
5042/* Optional: */
5043
5044/* eciwx */
5045static void gen_eciwx(DisasContext *ctx)
5046{
5047    TCGv t0;
5048    /* Should check EAR[E] ! */
5049    gen_set_access_type(ctx, ACCESS_EXT);
5050    t0 = tcg_temp_new();
5051    gen_addr_reg_index(ctx, t0);
5052    tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5053                       DEF_MEMOP(MO_UL | MO_ALIGN));
5054    tcg_temp_free(t0);
5055}
5056
5057/* ecowx */
5058static void gen_ecowx(DisasContext *ctx)
5059{
5060    TCGv t0;
5061    /* Should check EAR[E] ! */
5062    gen_set_access_type(ctx, ACCESS_EXT);
5063    t0 = tcg_temp_new();
5064    gen_addr_reg_index(ctx, t0);
5065    tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5066                       DEF_MEMOP(MO_UL | MO_ALIGN));
5067    tcg_temp_free(t0);
5068}
5069
5070/* PowerPC 601 specific instructions */
5071
5072/* abs - abs. */
5073static void gen_abs(DisasContext *ctx)
5074{
5075    TCGv d = cpu_gpr[rD(ctx->opcode)];
5076    TCGv a = cpu_gpr[rA(ctx->opcode)];
5077
5078    tcg_gen_abs_tl(d, a);
5079    if (unlikely(Rc(ctx->opcode) != 0)) {
5080        gen_set_Rc0(ctx, d);
5081    }
5082}
5083
5084/* abso - abso. */
5085static void gen_abso(DisasContext *ctx)
5086{
5087    TCGv d = cpu_gpr[rD(ctx->opcode)];
5088    TCGv a = cpu_gpr[rA(ctx->opcode)];
5089
5090    tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_ov, a, 0x80000000);
5091    tcg_gen_abs_tl(d, a);
5092    tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
5093    if (unlikely(Rc(ctx->opcode) != 0)) {
5094        gen_set_Rc0(ctx, d);
5095    }
5096}
5097
5098/* clcs */
5099static void gen_clcs(DisasContext *ctx)
5100{
5101    TCGv_i32 t0 = tcg_const_i32(rA(ctx->opcode));
5102    gen_helper_clcs(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5103    tcg_temp_free_i32(t0);
5104    /* Rc=1 sets CR0 to an undefined state */
5105}
5106
5107/* div - div. */
5108static void gen_div(DisasContext *ctx)
5109{
5110    gen_helper_div(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
5111                   cpu_gpr[rB(ctx->opcode)]);
5112    if (unlikely(Rc(ctx->opcode) != 0)) {
5113        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
5114    }
5115}
5116
5117/* divo - divo. */
5118static void gen_divo(DisasContext *ctx)
5119{
5120    gen_helper_divo(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
5121                    cpu_gpr[rB(ctx->opcode)]);
5122    if (unlikely(Rc(ctx->opcode) != 0)) {
5123        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
5124    }
5125}
5126
5127/* divs - divs. */
5128static void gen_divs(DisasContext *ctx)
5129{
5130    gen_helper_divs(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
5131                    cpu_gpr[rB(ctx->opcode)]);
5132    if (unlikely(Rc(ctx->opcode) != 0)) {
5133        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
5134    }
5135}
5136
5137/* divso - divso. */
5138static void gen_divso(DisasContext *ctx)
5139{
5140    gen_helper_divso(cpu_gpr[rD(ctx->opcode)], cpu_env,
5141                     cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
5142    if (unlikely(Rc(ctx->opcode) != 0)) {
5143        gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);