qemu/tcg/tcg.h
<<
>>
Prefs
   1/*
   2 * Tiny Code Generator for QEMU
   3 *
   4 * Copyright (c) 2008 Fabrice Bellard
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#ifndef TCG_H
  26#define TCG_H
  27
  28#include "qemu-common.h"
  29#include "cpu.h"
  30#include "exec/tb-context.h"
  31#include "qemu/bitops.h"
  32#include "tcg-target.h"
  33
  34/* XXX: make safe guess about sizes */
  35#define MAX_OP_PER_INSTR 266
  36
  37#if HOST_LONG_BITS == 32
  38#define MAX_OPC_PARAM_PER_ARG 2
  39#else
  40#define MAX_OPC_PARAM_PER_ARG 1
  41#endif
  42#define MAX_OPC_PARAM_IARGS 5
  43#define MAX_OPC_PARAM_OARGS 1
  44#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
  45
  46/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
  47 * and up to 4 + N parameters on 64-bit archs
  48 * (N = number of input arguments + output arguments).  */
  49#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
  50#define OPC_BUF_SIZE 640
  51#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
  52
  53#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
  54
  55#define CPU_TEMP_BUF_NLONGS 128
  56
  57/* Default target word size to pointer size.  */
  58#ifndef TCG_TARGET_REG_BITS
  59# if UINTPTR_MAX == UINT32_MAX
  60#  define TCG_TARGET_REG_BITS 32
  61# elif UINTPTR_MAX == UINT64_MAX
  62#  define TCG_TARGET_REG_BITS 64
  63# else
  64#  error Unknown pointer size for tcg target
  65# endif
  66#endif
  67
  68#if TCG_TARGET_REG_BITS == 32
  69typedef int32_t tcg_target_long;
  70typedef uint32_t tcg_target_ulong;
  71#define TCG_PRIlx PRIx32
  72#define TCG_PRIld PRId32
  73#elif TCG_TARGET_REG_BITS == 64
  74typedef int64_t tcg_target_long;
  75typedef uint64_t tcg_target_ulong;
  76#define TCG_PRIlx PRIx64
  77#define TCG_PRIld PRId64
  78#else
  79#error unsupported
  80#endif
  81
  82#if TCG_TARGET_NB_REGS <= 32
  83typedef uint32_t TCGRegSet;
  84#elif TCG_TARGET_NB_REGS <= 64
  85typedef uint64_t TCGRegSet;
  86#else
  87#error unsupported
  88#endif
  89
  90#if TCG_TARGET_REG_BITS == 32
  91/* Turn some undef macros into false macros.  */
  92#define TCG_TARGET_HAS_extrl_i64_i32    0
  93#define TCG_TARGET_HAS_extrh_i64_i32    0
  94#define TCG_TARGET_HAS_div_i64          0
  95#define TCG_TARGET_HAS_rem_i64          0
  96#define TCG_TARGET_HAS_div2_i64         0
  97#define TCG_TARGET_HAS_rot_i64          0
  98#define TCG_TARGET_HAS_ext8s_i64        0
  99#define TCG_TARGET_HAS_ext16s_i64       0
 100#define TCG_TARGET_HAS_ext32s_i64       0
 101#define TCG_TARGET_HAS_ext8u_i64        0
 102#define TCG_TARGET_HAS_ext16u_i64       0
 103#define TCG_TARGET_HAS_ext32u_i64       0
 104#define TCG_TARGET_HAS_bswap16_i64      0
 105#define TCG_TARGET_HAS_bswap32_i64      0
 106#define TCG_TARGET_HAS_bswap64_i64      0
 107#define TCG_TARGET_HAS_neg_i64          0
 108#define TCG_TARGET_HAS_not_i64          0
 109#define TCG_TARGET_HAS_andc_i64         0
 110#define TCG_TARGET_HAS_orc_i64          0
 111#define TCG_TARGET_HAS_eqv_i64          0
 112#define TCG_TARGET_HAS_nand_i64         0
 113#define TCG_TARGET_HAS_nor_i64          0
 114#define TCG_TARGET_HAS_deposit_i64      0
 115#define TCG_TARGET_HAS_movcond_i64      0
 116#define TCG_TARGET_HAS_add2_i64         0
 117#define TCG_TARGET_HAS_sub2_i64         0
 118#define TCG_TARGET_HAS_mulu2_i64        0
 119#define TCG_TARGET_HAS_muls2_i64        0
 120#define TCG_TARGET_HAS_muluh_i64        0
 121#define TCG_TARGET_HAS_mulsh_i64        0
 122/* Turn some undef macros into true macros.  */
 123#define TCG_TARGET_HAS_add2_i32         1
 124#define TCG_TARGET_HAS_sub2_i32         1
 125#endif
 126
 127#ifndef TCG_TARGET_deposit_i32_valid
 128#define TCG_TARGET_deposit_i32_valid(ofs, len) 1
 129#endif
 130#ifndef TCG_TARGET_deposit_i64_valid
 131#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
 132#endif
 133
 134/* Only one of DIV or DIV2 should be defined.  */
 135#if defined(TCG_TARGET_HAS_div_i32)
 136#define TCG_TARGET_HAS_div2_i32         0
 137#elif defined(TCG_TARGET_HAS_div2_i32)
 138#define TCG_TARGET_HAS_div_i32          0
 139#define TCG_TARGET_HAS_rem_i32          0
 140#endif
 141#if defined(TCG_TARGET_HAS_div_i64)
 142#define TCG_TARGET_HAS_div2_i64         0
 143#elif defined(TCG_TARGET_HAS_div2_i64)
 144#define TCG_TARGET_HAS_div_i64          0
 145#define TCG_TARGET_HAS_rem_i64          0
 146#endif
 147
 148/* For 32-bit targets, some sort of unsigned widening multiply is required.  */
 149#if TCG_TARGET_REG_BITS == 32 \
 150    && !(defined(TCG_TARGET_HAS_mulu2_i32) \
 151         || defined(TCG_TARGET_HAS_muluh_i32))
 152# error "Missing unsigned widening multiply"
 153#endif
 154
 155#ifndef TARGET_INSN_START_EXTRA_WORDS
 156# define TARGET_INSN_START_WORDS 1
 157#else
 158# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
 159#endif
 160
 161typedef enum TCGOpcode {
 162#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
 163#include "tcg-opc.h"
 164#undef DEF
 165    NB_OPS,
 166} TCGOpcode;
 167
 168#define tcg_regset_clear(d) (d) = 0
 169#define tcg_regset_set(d, s) (d) = (s)
 170#define tcg_regset_set32(d, reg, val32) (d) |= (val32) << (reg)
 171#define tcg_regset_set_reg(d, r) (d) |= 1L << (r)
 172#define tcg_regset_reset_reg(d, r) (d) &= ~(1L << (r))
 173#define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1)
 174#define tcg_regset_or(d, a, b) (d) = (a) | (b)
 175#define tcg_regset_and(d, a, b) (d) = (a) & (b)
 176#define tcg_regset_andnot(d, a, b) (d) = (a) & ~(b)
 177#define tcg_regset_not(d, a) (d) = ~(a)
 178
 179#ifndef TCG_TARGET_INSN_UNIT_SIZE
 180# error "Missing TCG_TARGET_INSN_UNIT_SIZE"
 181#elif TCG_TARGET_INSN_UNIT_SIZE == 1
 182typedef uint8_t tcg_insn_unit;
 183#elif TCG_TARGET_INSN_UNIT_SIZE == 2
 184typedef uint16_t tcg_insn_unit;
 185#elif TCG_TARGET_INSN_UNIT_SIZE == 4
 186typedef uint32_t tcg_insn_unit;
 187#elif TCG_TARGET_INSN_UNIT_SIZE == 8
 188typedef uint64_t tcg_insn_unit;
 189#else
 190/* The port better have done this.  */
 191#endif
 192
 193
 194#if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
 195# define tcg_debug_assert(X) do { assert(X); } while (0)
 196#elif QEMU_GNUC_PREREQ(4, 5)
 197# define tcg_debug_assert(X) \
 198    do { if (!(X)) { __builtin_unreachable(); } } while (0)
 199#else
 200# define tcg_debug_assert(X) do { (void)(X); } while (0)
 201#endif
 202
 203typedef struct TCGRelocation {
 204    struct TCGRelocation *next;
 205    int type;
 206    tcg_insn_unit *ptr;
 207    intptr_t addend;
 208} TCGRelocation; 
 209
 210typedef struct TCGLabel {
 211    unsigned has_value : 1;
 212    unsigned id : 31;
 213    union {
 214        uintptr_t value;
 215        tcg_insn_unit *value_ptr;
 216        TCGRelocation *first_reloc;
 217    } u;
 218} TCGLabel;
 219
 220typedef struct TCGPool {
 221    struct TCGPool *next;
 222    int size;
 223    uint8_t data[0] __attribute__ ((aligned));
 224} TCGPool;
 225
 226#define TCG_POOL_CHUNK_SIZE 32768
 227
 228#define TCG_MAX_TEMPS 512
 229#define TCG_MAX_INSNS 512
 230
 231/* when the size of the arguments of a called function is smaller than
 232   this value, they are statically allocated in the TB stack frame */
 233#define TCG_STATIC_CALL_ARGS_SIZE 128
 234
 235typedef enum TCGType {
 236    TCG_TYPE_I32,
 237    TCG_TYPE_I64,
 238    TCG_TYPE_COUNT, /* number of different types */
 239
 240    /* An alias for the size of the host register.  */
 241#if TCG_TARGET_REG_BITS == 32
 242    TCG_TYPE_REG = TCG_TYPE_I32,
 243#else
 244    TCG_TYPE_REG = TCG_TYPE_I64,
 245#endif
 246
 247    /* An alias for the size of the native pointer.  */
 248#if UINTPTR_MAX == UINT32_MAX
 249    TCG_TYPE_PTR = TCG_TYPE_I32,
 250#else
 251    TCG_TYPE_PTR = TCG_TYPE_I64,
 252#endif
 253
 254    /* An alias for the size of the target "long", aka register.  */
 255#if TARGET_LONG_BITS == 64
 256    TCG_TYPE_TL = TCG_TYPE_I64,
 257#else
 258    TCG_TYPE_TL = TCG_TYPE_I32,
 259#endif
 260} TCGType;
 261
 262/* Constants for qemu_ld and qemu_st for the Memory Operation field.  */
 263typedef enum TCGMemOp {
 264    MO_8     = 0,
 265    MO_16    = 1,
 266    MO_32    = 2,
 267    MO_64    = 3,
 268    MO_SIZE  = 3,   /* Mask for the above.  */
 269
 270    MO_SIGN  = 4,   /* Sign-extended, otherwise zero-extended.  */
 271
 272    MO_BSWAP = 8,   /* Host reverse endian.  */
 273#ifdef HOST_WORDS_BIGENDIAN
 274    MO_LE    = MO_BSWAP,
 275    MO_BE    = 0,
 276#else
 277    MO_LE    = 0,
 278    MO_BE    = MO_BSWAP,
 279#endif
 280#ifdef TARGET_WORDS_BIGENDIAN
 281    MO_TE    = MO_BE,
 282#else
 283    MO_TE    = MO_LE,
 284#endif
 285
 286    /* MO_UNALN accesses are never checked for alignment.
 287     * MO_ALIGN accesses will result in a call to the CPU's
 288     * do_unaligned_access hook if the guest address is not aligned.
 289     * The default depends on whether the target CPU defines ALIGNED_ONLY.
 290     * Some architectures (e.g. ARMv8) need the address which is aligned
 291     * to a size more than the size of the memory access.
 292     * To support such check it's enough the current costless alignment
 293     * check implementation in QEMU, but we need to support
 294     * an alignment size specifying.
 295     * MO_ALIGN supposes a natural alignment
 296     * (i.e. the alignment size is the size of a memory access).
 297     * Note that an alignment size must be equal or greater
 298     * than an access size.
 299     * There are three options:
 300     * - an alignment to the size of an access (MO_ALIGN);
 301     * - an alignment to the specified size that is equal or greater than
 302     *   an access size (MO_ALIGN_x where 'x' is a size in bytes);
 303     * - unaligned access permitted (MO_UNALN).
 304     */
 305    MO_ASHIFT = 4,
 306    MO_AMASK = 7 << MO_ASHIFT,
 307#ifdef ALIGNED_ONLY
 308    MO_ALIGN = 0,
 309    MO_UNALN = MO_AMASK,
 310#else
 311    MO_ALIGN = MO_AMASK,
 312    MO_UNALN = 0,
 313#endif
 314    MO_ALIGN_2  = 1 << MO_ASHIFT,
 315    MO_ALIGN_4  = 2 << MO_ASHIFT,
 316    MO_ALIGN_8  = 3 << MO_ASHIFT,
 317    MO_ALIGN_16 = 4 << MO_ASHIFT,
 318    MO_ALIGN_32 = 5 << MO_ASHIFT,
 319    MO_ALIGN_64 = 6 << MO_ASHIFT,
 320
 321    /* Combinations of the above, for ease of use.  */
 322    MO_UB    = MO_8,
 323    MO_UW    = MO_16,
 324    MO_UL    = MO_32,
 325    MO_SB    = MO_SIGN | MO_8,
 326    MO_SW    = MO_SIGN | MO_16,
 327    MO_SL    = MO_SIGN | MO_32,
 328    MO_Q     = MO_64,
 329
 330    MO_LEUW  = MO_LE | MO_UW,
 331    MO_LEUL  = MO_LE | MO_UL,
 332    MO_LESW  = MO_LE | MO_SW,
 333    MO_LESL  = MO_LE | MO_SL,
 334    MO_LEQ   = MO_LE | MO_Q,
 335
 336    MO_BEUW  = MO_BE | MO_UW,
 337    MO_BEUL  = MO_BE | MO_UL,
 338    MO_BESW  = MO_BE | MO_SW,
 339    MO_BESL  = MO_BE | MO_SL,
 340    MO_BEQ   = MO_BE | MO_Q,
 341
 342    MO_TEUW  = MO_TE | MO_UW,
 343    MO_TEUL  = MO_TE | MO_UL,
 344    MO_TESW  = MO_TE | MO_SW,
 345    MO_TESL  = MO_TE | MO_SL,
 346    MO_TEQ   = MO_TE | MO_Q,
 347
 348    MO_SSIZE = MO_SIZE | MO_SIGN,
 349} TCGMemOp;
 350
 351/**
 352 * get_alignment_bits
 353 * @memop: TCGMemOp value
 354 *
 355 * Extract the alignment size from the memop.
 356 *
 357 * Returns: 0 in case of byte access (which is always aligned);
 358 *          positive value - number of alignment bits;
 359 *          negative value if unaligned access enabled
 360 *          and this is not a byte access.
 361 */
 362static inline int get_alignment_bits(TCGMemOp memop)
 363{
 364    int a = memop & MO_AMASK;
 365    int s = memop & MO_SIZE;
 366    int r;
 367
 368    if (a == MO_UNALN) {
 369        /* Negative value if unaligned access enabled,
 370         * or zero value in case of byte access.
 371         */
 372        return -s;
 373    } else if (a == MO_ALIGN) {
 374        /* A natural alignment: return a number of access size bits */
 375        r = s;
 376    } else {
 377        /* Specific alignment size. It must be equal or greater
 378         * than the access size.
 379         */
 380        r = a >> MO_ASHIFT;
 381        tcg_debug_assert(r >= s);
 382    }
 383#if defined(CONFIG_SOFTMMU)
 384    /* The requested alignment cannot overlap the TLB flags.  */
 385    tcg_debug_assert((TLB_FLAGS_MASK & ((1 << r) - 1)) == 0);
 386#endif
 387    return r;
 388}
 389
 390typedef tcg_target_ulong TCGArg;
 391
 392/* Define a type and accessor macros for variables.  Using pointer types
 393   is nice because it gives some level of type safely.  Converting to and
 394   from intptr_t rather than int reduces the number of sign-extension
 395   instructions that get implied on 64-bit hosts.  Users of tcg_gen_* don't
 396   need to know about any of this, and should treat TCGv as an opaque type.
 397   In addition we do typechecking for different types of variables.  TCGv_i32
 398   and TCGv_i64 are 32/64-bit variables respectively.  TCGv and TCGv_ptr
 399   are aliases for target_ulong and host pointer sized values respectively.  */
 400
 401typedef struct TCGv_i32_d *TCGv_i32;
 402typedef struct TCGv_i64_d *TCGv_i64;
 403typedef struct TCGv_ptr_d *TCGv_ptr;
 404typedef TCGv_ptr TCGv_env;
 405#if TARGET_LONG_BITS == 32
 406#define TCGv TCGv_i32
 407#elif TARGET_LONG_BITS == 64
 408#define TCGv TCGv_i64
 409#else
 410#error Unhandled TARGET_LONG_BITS value
 411#endif
 412
 413static inline TCGv_i32 QEMU_ARTIFICIAL MAKE_TCGV_I32(intptr_t i)
 414{
 415    return (TCGv_i32)i;
 416}
 417
 418static inline TCGv_i64 QEMU_ARTIFICIAL MAKE_TCGV_I64(intptr_t i)
 419{
 420    return (TCGv_i64)i;
 421}
 422
 423static inline TCGv_ptr QEMU_ARTIFICIAL MAKE_TCGV_PTR(intptr_t i)
 424{
 425    return (TCGv_ptr)i;
 426}
 427
 428static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I32(TCGv_i32 t)
 429{
 430    return (intptr_t)t;
 431}
 432
 433static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I64(TCGv_i64 t)
 434{
 435    return (intptr_t)t;
 436}
 437
 438static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_PTR(TCGv_ptr t)
 439{
 440    return (intptr_t)t;
 441}
 442
 443#if TCG_TARGET_REG_BITS == 32
 444#define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t))
 445#define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1)
 446#endif
 447
 448#define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b))
 449#define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b))
 450#define TCGV_EQUAL_PTR(a, b) (GET_TCGV_PTR(a) == GET_TCGV_PTR(b))
 451
 452/* Dummy definition to avoid compiler warnings.  */
 453#define TCGV_UNUSED_I32(x) x = MAKE_TCGV_I32(-1)
 454#define TCGV_UNUSED_I64(x) x = MAKE_TCGV_I64(-1)
 455#define TCGV_UNUSED_PTR(x) x = MAKE_TCGV_PTR(-1)
 456
 457#define TCGV_IS_UNUSED_I32(x) (GET_TCGV_I32(x) == -1)
 458#define TCGV_IS_UNUSED_I64(x) (GET_TCGV_I64(x) == -1)
 459#define TCGV_IS_UNUSED_PTR(x) (GET_TCGV_PTR(x) == -1)
 460
 461/* call flags */
 462/* Helper does not read globals (either directly or through an exception). It
 463   implies TCG_CALL_NO_WRITE_GLOBALS. */
 464#define TCG_CALL_NO_READ_GLOBALS    0x0010
 465/* Helper does not write globals */
 466#define TCG_CALL_NO_WRITE_GLOBALS   0x0020
 467/* Helper can be safely suppressed if the return value is not used. */
 468#define TCG_CALL_NO_SIDE_EFFECTS    0x0040
 469
 470/* convenience version of most used call flags */
 471#define TCG_CALL_NO_RWG         TCG_CALL_NO_READ_GLOBALS
 472#define TCG_CALL_NO_WG          TCG_CALL_NO_WRITE_GLOBALS
 473#define TCG_CALL_NO_SE          TCG_CALL_NO_SIDE_EFFECTS
 474#define TCG_CALL_NO_RWG_SE      (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
 475#define TCG_CALL_NO_WG_SE       (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
 476
 477/* used to align parameters */
 478#define TCG_CALL_DUMMY_TCGV     MAKE_TCGV_I32(-1)
 479#define TCG_CALL_DUMMY_ARG      ((TCGArg)(-1))
 480
 481/* Conditions.  Note that these are laid out for easy manipulation by
 482   the functions below:
 483     bit 0 is used for inverting;
 484     bit 1 is signed,
 485     bit 2 is unsigned,
 486     bit 3 is used with bit 0 for swapping signed/unsigned.  */
 487typedef enum {
 488    /* non-signed */
 489    TCG_COND_NEVER  = 0 | 0 | 0 | 0,
 490    TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
 491    TCG_COND_EQ     = 8 | 0 | 0 | 0,
 492    TCG_COND_NE     = 8 | 0 | 0 | 1,
 493    /* signed */
 494    TCG_COND_LT     = 0 | 0 | 2 | 0,
 495    TCG_COND_GE     = 0 | 0 | 2 | 1,
 496    TCG_COND_LE     = 8 | 0 | 2 | 0,
 497    TCG_COND_GT     = 8 | 0 | 2 | 1,
 498    /* unsigned */
 499    TCG_COND_LTU    = 0 | 4 | 0 | 0,
 500    TCG_COND_GEU    = 0 | 4 | 0 | 1,
 501    TCG_COND_LEU    = 8 | 4 | 0 | 0,
 502    TCG_COND_GTU    = 8 | 4 | 0 | 1,
 503} TCGCond;
 504
 505/* Invert the sense of the comparison.  */
 506static inline TCGCond tcg_invert_cond(TCGCond c)
 507{
 508    return (TCGCond)(c ^ 1);
 509}
 510
 511/* Swap the operands in a comparison.  */
 512static inline TCGCond tcg_swap_cond(TCGCond c)
 513{
 514    return c & 6 ? (TCGCond)(c ^ 9) : c;
 515}
 516
 517/* Create an "unsigned" version of a "signed" comparison.  */
 518static inline TCGCond tcg_unsigned_cond(TCGCond c)
 519{
 520    return c & 2 ? (TCGCond)(c ^ 6) : c;
 521}
 522
 523/* Must a comparison be considered unsigned?  */
 524static inline bool is_unsigned_cond(TCGCond c)
 525{
 526    return (c & 4) != 0;
 527}
 528
 529/* Create a "high" version of a double-word comparison.
 530   This removes equality from a LTE or GTE comparison.  */
 531static inline TCGCond tcg_high_cond(TCGCond c)
 532{
 533    switch (c) {
 534    case TCG_COND_GE:
 535    case TCG_COND_LE:
 536    case TCG_COND_GEU:
 537    case TCG_COND_LEU:
 538        return (TCGCond)(c ^ 8);
 539    default:
 540        return c;
 541    }
 542}
 543
 544typedef enum TCGTempVal {
 545    TEMP_VAL_DEAD,
 546    TEMP_VAL_REG,
 547    TEMP_VAL_MEM,
 548    TEMP_VAL_CONST,
 549} TCGTempVal;
 550
 551typedef struct TCGTemp {
 552    TCGReg reg:8;
 553    TCGTempVal val_type:8;
 554    TCGType base_type:8;
 555    TCGType type:8;
 556    unsigned int fixed_reg:1;
 557    unsigned int indirect_reg:1;
 558    unsigned int indirect_base:1;
 559    unsigned int mem_coherent:1;
 560    unsigned int mem_allocated:1;
 561    unsigned int temp_local:1; /* If true, the temp is saved across
 562                                  basic blocks. Otherwise, it is not
 563                                  preserved across basic blocks. */
 564    unsigned int temp_allocated:1; /* never used for code gen */
 565
 566    tcg_target_long val;
 567    struct TCGTemp *mem_base;
 568    intptr_t mem_offset;
 569    const char *name;
 570} TCGTemp;
 571
 572typedef struct TCGContext TCGContext;
 573
 574typedef struct TCGTempSet {
 575    unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
 576} TCGTempSet;
 577
 578/* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
 579   this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
 580   There are never more than 2 outputs, which means that we can store all
 581   dead + sync data within 16 bits.  */
 582#define DEAD_ARG  4
 583#define SYNC_ARG  1
 584typedef uint16_t TCGLifeData;
 585
 586/* The layout here is designed to avoid crossing of a 32-bit boundary.
 587   If we do so, gcc adds padding, expanding the size to 12.  */
 588typedef struct TCGOp {
 589    TCGOpcode opc   : 8;        /*  8 */
 590
 591    /* Index of the prev/next op, or 0 for the end of the list.  */
 592    unsigned prev   : 10;       /* 18 */
 593    unsigned next   : 10;       /* 28 */
 594
 595    /* The number of out and in parameter for a call.  */
 596    unsigned calli  : 4;        /* 32 */
 597    unsigned callo  : 2;        /* 34 */
 598
 599    /* Index of the arguments for this op, or 0 for zero-operand ops.  */
 600    unsigned args   : 14;       /* 48 */
 601
 602    /* Lifetime data of the operands.  */
 603    unsigned life   : 16;       /* 64 */
 604} TCGOp;
 605
 606/* Make sure operands fit in the bitfields above.  */
 607QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
 608QEMU_BUILD_BUG_ON(OPC_BUF_SIZE > (1 << 10));
 609QEMU_BUILD_BUG_ON(OPPARAM_BUF_SIZE > (1 << 14));
 610
 611/* Make sure that we don't overflow 64 bits without noticing.  */
 612QEMU_BUILD_BUG_ON(sizeof(TCGOp) > 8);
 613
 614struct TCGContext {
 615    uint8_t *pool_cur, *pool_end;
 616    TCGPool *pool_first, *pool_current, *pool_first_large;
 617    int nb_labels;
 618    int nb_globals;
 619    int nb_temps;
 620    int nb_indirects;
 621
 622    /* goto_tb support */
 623    tcg_insn_unit *code_buf;
 624    uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
 625    uint16_t *tb_jmp_insn_offset; /* tb->jmp_insn_offset if USE_DIRECT_JUMP */
 626    uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_addr if !USE_DIRECT_JUMP */
 627
 628    TCGRegSet reserved_regs;
 629    intptr_t current_frame_offset;
 630    intptr_t frame_start;
 631    intptr_t frame_end;
 632    TCGTemp *frame_temp;
 633
 634    tcg_insn_unit *code_ptr;
 635
 636    GHashTable *helpers;
 637
 638#ifdef CONFIG_PROFILER
 639    /* profiling info */
 640    int64_t tb_count1;
 641    int64_t tb_count;
 642    int64_t op_count; /* total insn count */
 643    int op_count_max; /* max insn per TB */
 644    int64_t temp_count;
 645    int temp_count_max;
 646    int64_t del_op_count;
 647    int64_t code_in_len;
 648    int64_t code_out_len;
 649    int64_t search_out_len;
 650    int64_t interm_time;
 651    int64_t code_time;
 652    int64_t la_time;
 653    int64_t opt_time;
 654    int64_t restore_count;
 655    int64_t restore_time;
 656#endif
 657
 658#ifdef CONFIG_DEBUG_TCG
 659    int temps_in_use;
 660    int goto_tb_issue_mask;
 661#endif
 662
 663    int gen_next_op_idx;
 664    int gen_next_parm_idx;
 665
 666    /* Code generation.  Note that we specifically do not use tcg_insn_unit
 667       here, because there's too much arithmetic throughout that relies
 668       on addition and subtraction working on bytes.  Rely on the GCC
 669       extension that allows arithmetic on void*.  */
 670    int code_gen_max_blocks;
 671    void *code_gen_prologue;
 672    void *code_gen_buffer;
 673    size_t code_gen_buffer_size;
 674    void *code_gen_ptr;
 675
 676    /* Threshold to flush the translated code buffer.  */
 677    void *code_gen_highwater;
 678
 679    TBContext tb_ctx;
 680
 681    /* Track which vCPU triggers events */
 682    CPUState *cpu;                      /* *_trans */
 683    TCGv_env tcg_env;                   /* *_exec  */
 684
 685    /* The TCGBackendData structure is private to tcg-target.inc.c.  */
 686    struct TCGBackendData *be;
 687
 688    TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
 689    TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
 690
 691    /* Tells which temporary holds a given register.
 692       It does not take into account fixed registers */
 693    TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
 694
 695    TCGOp gen_op_buf[OPC_BUF_SIZE];
 696    TCGArg gen_opparam_buf[OPPARAM_BUF_SIZE];
 697
 698    uint16_t gen_insn_end_off[TCG_MAX_INSNS];
 699    target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
 700};
 701
 702extern TCGContext tcg_ctx;
 703
 704static inline void tcg_set_insn_param(int op_idx, int arg, TCGArg v)
 705{
 706    int op_argi = tcg_ctx.gen_op_buf[op_idx].args;
 707    tcg_ctx.gen_opparam_buf[op_argi + arg] = v;
 708}
 709
 710/* The number of opcodes emitted so far.  */
 711static inline int tcg_op_buf_count(void)
 712{
 713    return tcg_ctx.gen_next_op_idx;
 714}
 715
 716/* Test for whether to terminate the TB for using too many opcodes.  */
 717static inline bool tcg_op_buf_full(void)
 718{
 719    return tcg_op_buf_count() >= OPC_MAX_SIZE;
 720}
 721
 722/* pool based memory allocation */
 723
 724void *tcg_malloc_internal(TCGContext *s, int size);
 725void tcg_pool_reset(TCGContext *s);
 726void tcg_pool_delete(TCGContext *s);
 727
 728void tb_lock(void);
 729void tb_unlock(void);
 730void tb_lock_reset(void);
 731
 732static inline void *tcg_malloc(int size)
 733{
 734    TCGContext *s = &tcg_ctx;
 735    uint8_t *ptr, *ptr_end;
 736    size = (size + sizeof(long) - 1) & ~(sizeof(long) - 1);
 737    ptr = s->pool_cur;
 738    ptr_end = ptr + size;
 739    if (unlikely(ptr_end > s->pool_end)) {
 740        return tcg_malloc_internal(&tcg_ctx, size);
 741    } else {
 742        s->pool_cur = ptr_end;
 743        return ptr;
 744    }
 745}
 746
 747void tcg_context_init(TCGContext *s);
 748void tcg_prologue_init(TCGContext *s);
 749void tcg_func_start(TCGContext *s);
 750
 751int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
 752
 753void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
 754
 755int tcg_global_mem_new_internal(TCGType, TCGv_ptr, intptr_t, const char *);
 756
 757TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name);
 758TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name);
 759
 760TCGv_i32 tcg_temp_new_internal_i32(int temp_local);
 761TCGv_i64 tcg_temp_new_internal_i64(int temp_local);
 762
 763void tcg_temp_free_i32(TCGv_i32 arg);
 764void tcg_temp_free_i64(TCGv_i64 arg);
 765
 766static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
 767                                              const char *name)
 768{
 769    int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
 770    return MAKE_TCGV_I32(idx);
 771}
 772
 773static inline TCGv_i32 tcg_temp_new_i32(void)
 774{
 775    return tcg_temp_new_internal_i32(0);
 776}
 777
 778static inline TCGv_i32 tcg_temp_local_new_i32(void)
 779{
 780    return tcg_temp_new_internal_i32(1);
 781}
 782
 783static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
 784                                              const char *name)
 785{
 786    int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
 787    return MAKE_TCGV_I64(idx);
 788}
 789
 790static inline TCGv_i64 tcg_temp_new_i64(void)
 791{
 792    return tcg_temp_new_internal_i64(0);
 793}
 794
 795static inline TCGv_i64 tcg_temp_local_new_i64(void)
 796{
 797    return tcg_temp_new_internal_i64(1);
 798}
 799
 800#if defined(CONFIG_DEBUG_TCG)
 801/* If you call tcg_clear_temp_count() at the start of a section of
 802 * code which is not supposed to leak any TCG temporaries, then
 803 * calling tcg_check_temp_count() at the end of the section will
 804 * return 1 if the section did in fact leak a temporary.
 805 */
 806void tcg_clear_temp_count(void);
 807int tcg_check_temp_count(void);
 808#else
 809#define tcg_clear_temp_count() do { } while (0)
 810#define tcg_check_temp_count() 0
 811#endif
 812
 813void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf);
 814void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf);
 815
 816#define TCG_CT_ALIAS  0x80
 817#define TCG_CT_IALIAS 0x40
 818#define TCG_CT_REG    0x01
 819#define TCG_CT_CONST  0x02 /* any constant of register size */
 820
 821typedef struct TCGArgConstraint {
 822    uint16_t ct;
 823    uint8_t alias_index;
 824    union {
 825        TCGRegSet regs;
 826    } u;
 827} TCGArgConstraint;
 828
 829#define TCG_MAX_OP_ARGS 16
 830
 831/* Bits for TCGOpDef->flags, 8 bits available.  */
 832enum {
 833    /* Instruction defines the end of a basic block.  */
 834    TCG_OPF_BB_END       = 0x01,
 835    /* Instruction clobbers call registers and potentially update globals.  */
 836    TCG_OPF_CALL_CLOBBER = 0x02,
 837    /* Instruction has side effects: it cannot be removed if its outputs
 838       are not used, and might trigger exceptions.  */
 839    TCG_OPF_SIDE_EFFECTS = 0x04,
 840    /* Instruction operands are 64-bits (otherwise 32-bits).  */
 841    TCG_OPF_64BIT        = 0x08,
 842    /* Instruction is optional and not implemented by the host, or insn
 843       is generic and should not be implemened by the host.  */
 844    TCG_OPF_NOT_PRESENT  = 0x10,
 845};
 846
 847typedef struct TCGOpDef {
 848    const char *name;
 849    uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
 850    uint8_t flags;
 851    TCGArgConstraint *args_ct;
 852    int *sorted_args;
 853#if defined(CONFIG_DEBUG_TCG)
 854    int used;
 855#endif
 856} TCGOpDef;
 857
 858extern TCGOpDef tcg_op_defs[];
 859extern const size_t tcg_op_defs_max;
 860
 861typedef struct TCGTargetOpDef {
 862    TCGOpcode op;
 863    const char *args_ct_str[TCG_MAX_OP_ARGS];
 864} TCGTargetOpDef;
 865
 866#define tcg_abort() \
 867do {\
 868    fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
 869    abort();\
 870} while (0)
 871
 872void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs);
 873
 874#if UINTPTR_MAX == UINT32_MAX
 875#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I32(n))
 876#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I32(GET_TCGV_PTR(n))
 877
 878#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V)))
 879#define tcg_global_reg_new_ptr(R, N) \
 880    TCGV_NAT_TO_PTR(tcg_global_reg_new_i32((R), (N)))
 881#define tcg_global_mem_new_ptr(R, O, N) \
 882    TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N)))
 883#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32())
 884#define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T))
 885#else
 886#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I64(n))
 887#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I64(GET_TCGV_PTR(n))
 888
 889#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V)))
 890#define tcg_global_reg_new_ptr(R, N) \
 891    TCGV_NAT_TO_PTR(tcg_global_reg_new_i64((R), (N)))
 892#define tcg_global_mem_new_ptr(R, O, N) \
 893    TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N)))
 894#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64())
 895#define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T))
 896#endif
 897
 898void tcg_gen_callN(TCGContext *s, void *func,
 899                   TCGArg ret, int nargs, TCGArg *args);
 900
 901void tcg_op_remove(TCGContext *s, TCGOp *op);
 902TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
 903TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
 904
 905void tcg_optimize(TCGContext *s);
 906
 907/* only used for debugging purposes */
 908void tcg_dump_ops(TCGContext *s);
 909
 910void dump_ops(const uint16_t *opc_buf, const TCGArg *opparam_buf);
 911TCGv_i32 tcg_const_i32(int32_t val);
 912TCGv_i64 tcg_const_i64(int64_t val);
 913TCGv_i32 tcg_const_local_i32(int32_t val);
 914TCGv_i64 tcg_const_local_i64(int64_t val);
 915
 916TCGLabel *gen_new_label(void);
 917
 918/**
 919 * label_arg
 920 * @l: label
 921 *
 922 * Encode a label for storage in the TCG opcode stream.
 923 */
 924
 925static inline TCGArg label_arg(TCGLabel *l)
 926{
 927    return (uintptr_t)l;
 928}
 929
 930/**
 931 * arg_label
 932 * @i: value
 933 *
 934 * The opposite of label_arg.  Retrieve a label from the
 935 * encoding of the TCG opcode stream.
 936 */
 937
 938static inline TCGLabel *arg_label(TCGArg i)
 939{
 940    return (TCGLabel *)(uintptr_t)i;
 941}
 942
 943/**
 944 * tcg_ptr_byte_diff
 945 * @a, @b: addresses to be differenced
 946 *
 947 * There are many places within the TCG backends where we need a byte
 948 * difference between two pointers.  While this can be accomplished
 949 * with local casting, it's easy to get wrong -- especially if one is
 950 * concerned with the signedness of the result.
 951 *
 952 * This version relies on GCC's void pointer arithmetic to get the
 953 * correct result.
 954 */
 955
 956static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
 957{
 958    return a - b;
 959}
 960
 961/**
 962 * tcg_pcrel_diff
 963 * @s: the tcg context
 964 * @target: address of the target
 965 *
 966 * Produce a pc-relative difference, from the current code_ptr
 967 * to the destination address.
 968 */
 969
 970static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target)
 971{
 972    return tcg_ptr_byte_diff(target, s->code_ptr);
 973}
 974
 975/**
 976 * tcg_current_code_size
 977 * @s: the tcg context
 978 *
 979 * Compute the current code size within the translation block.
 980 * This is used to fill in qemu's data structures for goto_tb.
 981 */
 982
 983static inline size_t tcg_current_code_size(TCGContext *s)
 984{
 985    return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
 986}
 987
 988/* Combine the TCGMemOp and mmu_idx parameters into a single value.  */
 989typedef uint32_t TCGMemOpIdx;
 990
 991/**
 992 * make_memop_idx
 993 * @op: memory operation
 994 * @idx: mmu index
 995 *
 996 * Encode these values into a single parameter.
 997 */
 998static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
 999{
1000    tcg_debug_assert(idx <= 15);
1001    return (op << 4) | idx;
1002}
1003
1004/**
1005 * get_memop
1006 * @oi: combined op/idx parameter
1007 *
1008 * Extract the memory operation from the combined value.
1009 */
1010static inline TCGMemOp get_memop(TCGMemOpIdx oi)
1011{
1012    return oi >> 4;
1013}
1014
1015/**
1016 * get_mmuidx
1017 * @oi: combined op/idx parameter
1018 *
1019 * Extract the mmu index from the combined value.
1020 */
1021static inline unsigned get_mmuidx(TCGMemOpIdx oi)
1022{
1023    return oi & 15;
1024}
1025
1026/**
1027 * tcg_qemu_tb_exec:
1028 * @env: pointer to CPUArchState for the CPU
1029 * @tb_ptr: address of generated code for the TB to execute
1030 *
1031 * Start executing code from a given translation block.
1032 * Where translation blocks have been linked, execution
1033 * may proceed from the given TB into successive ones.
1034 * Control eventually returns only when some action is needed
1035 * from the top-level loop: either control must pass to a TB
1036 * which has not yet been directly linked, or an asynchronous
1037 * event such as an interrupt needs handling.
1038 *
1039 * Return: The return value is the value passed to the corresponding
1040 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
1041 * The value is either zero or a 4-byte aligned pointer to that TB combined
1042 * with additional information in its two least significant bits. The
1043 * additional information is encoded as follows:
1044 *  0, 1: the link between this TB and the next is via the specified
1045 *        TB index (0 or 1). That is, we left the TB via (the equivalent
1046 *        of) "goto_tb <index>". The main loop uses this to determine
1047 *        how to link the TB just executed to the next.
1048 *  2:    we are using instruction counting code generation, and we
1049 *        did not start executing this TB because the instruction counter
1050 *        would hit zero midway through it. In this case the pointer
1051 *        returned is the TB we were about to execute, and the caller must
1052 *        arrange to execute the remaining count of instructions.
1053 *  3:    we stopped because the CPU's exit_request flag was set
1054 *        (usually meaning that there is an interrupt that needs to be
1055 *        handled). The pointer returned is the TB we were about to execute
1056 *        when we noticed the pending exit request.
1057 *
1058 * If the bottom two bits indicate an exit-via-index then the CPU
1059 * state is correctly synchronised and ready for execution of the next
1060 * TB (and in particular the guest PC is the address to execute next).
1061 * Otherwise, we gave up on execution of this TB before it started, and
1062 * the caller must fix up the CPU state by calling the CPU's
1063 * synchronize_from_tb() method with the TB pointer we return (falling
1064 * back to calling the CPU's set_pc method with tb->pb if no
1065 * synchronize_from_tb() method exists).
1066 *
1067 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
1068 * to this default (which just calls the prologue.code emitted by
1069 * tcg_target_qemu_prologue()).
1070 */
1071#define TB_EXIT_MASK 3
1072#define TB_EXIT_IDX0 0
1073#define TB_EXIT_IDX1 1
1074#define TB_EXIT_ICOUNT_EXPIRED 2
1075#define TB_EXIT_REQUESTED 3
1076
1077#ifdef HAVE_TCG_QEMU_TB_EXEC
1078uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
1079#else
1080# define tcg_qemu_tb_exec(env, tb_ptr) \
1081    ((uintptr_t (*)(void *, void *))tcg_ctx.code_gen_prologue)(env, tb_ptr)
1082#endif
1083
1084void tcg_register_jit(void *buf, size_t buf_size);
1085
1086/*
1087 * Memory helpers that will be used by TCG generated code.
1088 */
1089#ifdef CONFIG_SOFTMMU
1090/* Value zero-extended to tcg register size.  */
1091tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1092                                     TCGMemOpIdx oi, uintptr_t retaddr);
1093tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1094                                    TCGMemOpIdx oi, uintptr_t retaddr);
1095tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1096                                    TCGMemOpIdx oi, uintptr_t retaddr);
1097uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1098                           TCGMemOpIdx oi, uintptr_t retaddr);
1099tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1100                                    TCGMemOpIdx oi, uintptr_t retaddr);
1101tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1102                                    TCGMemOpIdx oi, uintptr_t retaddr);
1103uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1104                           TCGMemOpIdx oi, uintptr_t retaddr);
1105
1106/* Value sign-extended to tcg register size.  */
1107tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1108                                     TCGMemOpIdx oi, uintptr_t retaddr);
1109tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1110                                    TCGMemOpIdx oi, uintptr_t retaddr);
1111tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1112                                    TCGMemOpIdx oi, uintptr_t retaddr);
1113tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1114                                    TCGMemOpIdx oi, uintptr_t retaddr);
1115tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1116                                    TCGMemOpIdx oi, uintptr_t retaddr);
1117
1118void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1119                        TCGMemOpIdx oi, uintptr_t retaddr);
1120void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1121                       TCGMemOpIdx oi, uintptr_t retaddr);
1122void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1123                       TCGMemOpIdx oi, uintptr_t retaddr);
1124void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1125                       TCGMemOpIdx oi, uintptr_t retaddr);
1126void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1127                       TCGMemOpIdx oi, uintptr_t retaddr);
1128void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1129                       TCGMemOpIdx oi, uintptr_t retaddr);
1130void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1131                       TCGMemOpIdx oi, uintptr_t retaddr);
1132
1133uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1134                            TCGMemOpIdx oi, uintptr_t retaddr);
1135uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1136                            TCGMemOpIdx oi, uintptr_t retaddr);
1137uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1138                            TCGMemOpIdx oi, uintptr_t retaddr);
1139uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1140                            TCGMemOpIdx oi, uintptr_t retaddr);
1141uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1142                            TCGMemOpIdx oi, uintptr_t retaddr);
1143uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1144                            TCGMemOpIdx oi, uintptr_t retaddr);
1145uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1146                            TCGMemOpIdx oi, uintptr_t retaddr);
1147
1148/* Temporary aliases until backends are converted.  */
1149#ifdef TARGET_WORDS_BIGENDIAN
1150# define helper_ret_ldsw_mmu  helper_be_ldsw_mmu
1151# define helper_ret_lduw_mmu  helper_be_lduw_mmu
1152# define helper_ret_ldsl_mmu  helper_be_ldsl_mmu
1153# define helper_ret_ldul_mmu  helper_be_ldul_mmu
1154# define helper_ret_ldl_mmu   helper_be_ldul_mmu
1155# define helper_ret_ldq_mmu   helper_be_ldq_mmu
1156# define helper_ret_stw_mmu   helper_be_stw_mmu
1157# define helper_ret_stl_mmu   helper_be_stl_mmu
1158# define helper_ret_stq_mmu   helper_be_stq_mmu
1159# define helper_ret_ldw_cmmu  helper_be_ldw_cmmu
1160# define helper_ret_ldl_cmmu  helper_be_ldl_cmmu
1161# define helper_ret_ldq_cmmu  helper_be_ldq_cmmu
1162#else
1163# define helper_ret_ldsw_mmu  helper_le_ldsw_mmu
1164# define helper_ret_lduw_mmu  helper_le_lduw_mmu
1165# define helper_ret_ldsl_mmu  helper_le_ldsl_mmu
1166# define helper_ret_ldul_mmu  helper_le_ldul_mmu
1167# define helper_ret_ldl_mmu   helper_le_ldul_mmu
1168# define helper_ret_ldq_mmu   helper_le_ldq_mmu
1169# define helper_ret_stw_mmu   helper_le_stw_mmu
1170# define helper_ret_stl_mmu   helper_le_stl_mmu
1171# define helper_ret_stq_mmu   helper_le_stq_mmu
1172# define helper_ret_ldw_cmmu  helper_le_ldw_cmmu
1173# define helper_ret_ldl_cmmu  helper_le_ldl_cmmu
1174# define helper_ret_ldq_cmmu  helper_le_ldq_cmmu
1175#endif
1176
1177#endif /* CONFIG_SOFTMMU */
1178
1179#endif /* TCG_H */
1180