qemu/tcg/tcg.h
<<
>>
Prefs
   1/*
   2 * Tiny Code Generator for QEMU
   3 *
   4 * Copyright (c) 2008 Fabrice Bellard
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#ifndef TCG_H
  26#define TCG_H
  27
  28#include "qemu-common.h"
  29#include "cpu.h"
  30#include "exec/tb-context.h"
  31#include "qemu/bitops.h"
  32#include "tcg-mo.h"
  33#include "tcg-target.h"
  34
  35/* XXX: make safe guess about sizes */
  36#define MAX_OP_PER_INSTR 266
  37
  38#if HOST_LONG_BITS == 32
  39#define MAX_OPC_PARAM_PER_ARG 2
  40#else
  41#define MAX_OPC_PARAM_PER_ARG 1
  42#endif
  43#define MAX_OPC_PARAM_IARGS 5
  44#define MAX_OPC_PARAM_OARGS 1
  45#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
  46
  47/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
  48 * and up to 4 + N parameters on 64-bit archs
  49 * (N = number of input arguments + output arguments).  */
  50#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
  51#define OPC_BUF_SIZE 640
  52#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
  53
  54#define CPU_TEMP_BUF_NLONGS 128
  55
  56/* Default target word size to pointer size.  */
  57#ifndef TCG_TARGET_REG_BITS
  58# if UINTPTR_MAX == UINT32_MAX
  59#  define TCG_TARGET_REG_BITS 32
  60# elif UINTPTR_MAX == UINT64_MAX
  61#  define TCG_TARGET_REG_BITS 64
  62# else
  63#  error Unknown pointer size for tcg target
  64# endif
  65#endif
  66
  67#if TCG_TARGET_REG_BITS == 32
  68typedef int32_t tcg_target_long;
  69typedef uint32_t tcg_target_ulong;
  70#define TCG_PRIlx PRIx32
  71#define TCG_PRIld PRId32
  72#elif TCG_TARGET_REG_BITS == 64
  73typedef int64_t tcg_target_long;
  74typedef uint64_t tcg_target_ulong;
  75#define TCG_PRIlx PRIx64
  76#define TCG_PRIld PRId64
  77#else
  78#error unsupported
  79#endif
  80
  81/* Oversized TCG guests make things like MTTCG hard
  82 * as we can't use atomics for cputlb updates.
  83 */
  84#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
  85#define TCG_OVERSIZED_GUEST 1
  86#else
  87#define TCG_OVERSIZED_GUEST 0
  88#endif
  89
  90#if TCG_TARGET_NB_REGS <= 32
  91typedef uint32_t TCGRegSet;
  92#elif TCG_TARGET_NB_REGS <= 64
  93typedef uint64_t TCGRegSet;
  94#else
  95#error unsupported
  96#endif
  97
  98#if TCG_TARGET_REG_BITS == 32
  99/* Turn some undef macros into false macros.  */
 100#define TCG_TARGET_HAS_extrl_i64_i32    0
 101#define TCG_TARGET_HAS_extrh_i64_i32    0
 102#define TCG_TARGET_HAS_div_i64          0
 103#define TCG_TARGET_HAS_rem_i64          0
 104#define TCG_TARGET_HAS_div2_i64         0
 105#define TCG_TARGET_HAS_rot_i64          0
 106#define TCG_TARGET_HAS_ext8s_i64        0
 107#define TCG_TARGET_HAS_ext16s_i64       0
 108#define TCG_TARGET_HAS_ext32s_i64       0
 109#define TCG_TARGET_HAS_ext8u_i64        0
 110#define TCG_TARGET_HAS_ext16u_i64       0
 111#define TCG_TARGET_HAS_ext32u_i64       0
 112#define TCG_TARGET_HAS_bswap16_i64      0
 113#define TCG_TARGET_HAS_bswap32_i64      0
 114#define TCG_TARGET_HAS_bswap64_i64      0
 115#define TCG_TARGET_HAS_neg_i64          0
 116#define TCG_TARGET_HAS_not_i64          0
 117#define TCG_TARGET_HAS_andc_i64         0
 118#define TCG_TARGET_HAS_orc_i64          0
 119#define TCG_TARGET_HAS_eqv_i64          0
 120#define TCG_TARGET_HAS_nand_i64         0
 121#define TCG_TARGET_HAS_nor_i64          0
 122#define TCG_TARGET_HAS_clz_i64          0
 123#define TCG_TARGET_HAS_ctz_i64          0
 124#define TCG_TARGET_HAS_ctpop_i64        0
 125#define TCG_TARGET_HAS_deposit_i64      0
 126#define TCG_TARGET_HAS_extract_i64      0
 127#define TCG_TARGET_HAS_sextract_i64     0
 128#define TCG_TARGET_HAS_movcond_i64      0
 129#define TCG_TARGET_HAS_add2_i64         0
 130#define TCG_TARGET_HAS_sub2_i64         0
 131#define TCG_TARGET_HAS_mulu2_i64        0
 132#define TCG_TARGET_HAS_muls2_i64        0
 133#define TCG_TARGET_HAS_muluh_i64        0
 134#define TCG_TARGET_HAS_mulsh_i64        0
 135/* Turn some undef macros into true macros.  */
 136#define TCG_TARGET_HAS_add2_i32         1
 137#define TCG_TARGET_HAS_sub2_i32         1
 138#endif
 139
 140#ifndef TCG_TARGET_deposit_i32_valid
 141#define TCG_TARGET_deposit_i32_valid(ofs, len) 1
 142#endif
 143#ifndef TCG_TARGET_deposit_i64_valid
 144#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
 145#endif
 146#ifndef TCG_TARGET_extract_i32_valid
 147#define TCG_TARGET_extract_i32_valid(ofs, len) 1
 148#endif
 149#ifndef TCG_TARGET_extract_i64_valid
 150#define TCG_TARGET_extract_i64_valid(ofs, len) 1
 151#endif
 152
 153/* Only one of DIV or DIV2 should be defined.  */
 154#if defined(TCG_TARGET_HAS_div_i32)
 155#define TCG_TARGET_HAS_div2_i32         0
 156#elif defined(TCG_TARGET_HAS_div2_i32)
 157#define TCG_TARGET_HAS_div_i32          0
 158#define TCG_TARGET_HAS_rem_i32          0
 159#endif
 160#if defined(TCG_TARGET_HAS_div_i64)
 161#define TCG_TARGET_HAS_div2_i64         0
 162#elif defined(TCG_TARGET_HAS_div2_i64)
 163#define TCG_TARGET_HAS_div_i64          0
 164#define TCG_TARGET_HAS_rem_i64          0
 165#endif
 166
 167/* For 32-bit targets, some sort of unsigned widening multiply is required.  */
 168#if TCG_TARGET_REG_BITS == 32 \
 169    && !(defined(TCG_TARGET_HAS_mulu2_i32) \
 170         || defined(TCG_TARGET_HAS_muluh_i32))
 171# error "Missing unsigned widening multiply"
 172#endif
 173
 174#ifndef TARGET_INSN_START_EXTRA_WORDS
 175# define TARGET_INSN_START_WORDS 1
 176#else
 177# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
 178#endif
 179
 180typedef enum TCGOpcode {
 181#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
 182#include "tcg-opc.h"
 183#undef DEF
 184    NB_OPS,
 185} TCGOpcode;
 186
 187#define tcg_regset_set_reg(d, r)   ((d) |= (TCGRegSet)1 << (r))
 188#define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r)))
 189#define tcg_regset_test_reg(d, r)  (((d) >> (r)) & 1)
 190
 191#ifndef TCG_TARGET_INSN_UNIT_SIZE
 192# error "Missing TCG_TARGET_INSN_UNIT_SIZE"
 193#elif TCG_TARGET_INSN_UNIT_SIZE == 1
 194typedef uint8_t tcg_insn_unit;
 195#elif TCG_TARGET_INSN_UNIT_SIZE == 2
 196typedef uint16_t tcg_insn_unit;
 197#elif TCG_TARGET_INSN_UNIT_SIZE == 4
 198typedef uint32_t tcg_insn_unit;
 199#elif TCG_TARGET_INSN_UNIT_SIZE == 8
 200typedef uint64_t tcg_insn_unit;
 201#else
 202/* The port better have done this.  */
 203#endif
 204
 205
 206#if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
 207# define tcg_debug_assert(X) do { assert(X); } while (0)
 208#elif QEMU_GNUC_PREREQ(4, 5)
 209# define tcg_debug_assert(X) \
 210    do { if (!(X)) { __builtin_unreachable(); } } while (0)
 211#else
 212# define tcg_debug_assert(X) do { (void)(X); } while (0)
 213#endif
 214
 215typedef struct TCGRelocation {
 216    struct TCGRelocation *next;
 217    int type;
 218    tcg_insn_unit *ptr;
 219    intptr_t addend;
 220} TCGRelocation; 
 221
 222typedef struct TCGLabel {
 223    unsigned has_value : 1;
 224    unsigned id : 31;
 225    union {
 226        uintptr_t value;
 227        tcg_insn_unit *value_ptr;
 228        TCGRelocation *first_reloc;
 229    } u;
 230} TCGLabel;
 231
 232typedef struct TCGPool {
 233    struct TCGPool *next;
 234    int size;
 235    uint8_t data[0] __attribute__ ((aligned));
 236} TCGPool;
 237
 238#define TCG_POOL_CHUNK_SIZE 32768
 239
 240#define TCG_MAX_TEMPS 512
 241#define TCG_MAX_INSNS 512
 242
 243/* when the size of the arguments of a called function is smaller than
 244   this value, they are statically allocated in the TB stack frame */
 245#define TCG_STATIC_CALL_ARGS_SIZE 128
 246
 247typedef enum TCGType {
 248    TCG_TYPE_I32,
 249    TCG_TYPE_I64,
 250    TCG_TYPE_COUNT, /* number of different types */
 251
 252    /* An alias for the size of the host register.  */
 253#if TCG_TARGET_REG_BITS == 32
 254    TCG_TYPE_REG = TCG_TYPE_I32,
 255#else
 256    TCG_TYPE_REG = TCG_TYPE_I64,
 257#endif
 258
 259    /* An alias for the size of the native pointer.  */
 260#if UINTPTR_MAX == UINT32_MAX
 261    TCG_TYPE_PTR = TCG_TYPE_I32,
 262#else
 263    TCG_TYPE_PTR = TCG_TYPE_I64,
 264#endif
 265
 266    /* An alias for the size of the target "long", aka register.  */
 267#if TARGET_LONG_BITS == 64
 268    TCG_TYPE_TL = TCG_TYPE_I64,
 269#else
 270    TCG_TYPE_TL = TCG_TYPE_I32,
 271#endif
 272} TCGType;
 273
 274/* Constants for qemu_ld and qemu_st for the Memory Operation field.  */
 275typedef enum TCGMemOp {
 276    MO_8     = 0,
 277    MO_16    = 1,
 278    MO_32    = 2,
 279    MO_64    = 3,
 280    MO_SIZE  = 3,   /* Mask for the above.  */
 281
 282    MO_SIGN  = 4,   /* Sign-extended, otherwise zero-extended.  */
 283
 284    MO_BSWAP = 8,   /* Host reverse endian.  */
 285#ifdef HOST_WORDS_BIGENDIAN
 286    MO_LE    = MO_BSWAP,
 287    MO_BE    = 0,
 288#else
 289    MO_LE    = 0,
 290    MO_BE    = MO_BSWAP,
 291#endif
 292#ifdef TARGET_WORDS_BIGENDIAN
 293    MO_TE    = MO_BE,
 294#else
 295    MO_TE    = MO_LE,
 296#endif
 297
 298    /* MO_UNALN accesses are never checked for alignment.
 299     * MO_ALIGN accesses will result in a call to the CPU's
 300     * do_unaligned_access hook if the guest address is not aligned.
 301     * The default depends on whether the target CPU defines ALIGNED_ONLY.
 302     *
 303     * Some architectures (e.g. ARMv8) need the address which is aligned
 304     * to a size more than the size of the memory access.
 305     * Some architectures (e.g. SPARCv9) need an address which is aligned,
 306     * but less strictly than the natural alignment.
 307     *
 308     * MO_ALIGN supposes the alignment size is the size of a memory access.
 309     *
 310     * There are three options:
 311     * - unaligned access permitted (MO_UNALN).
 312     * - an alignment to the size of an access (MO_ALIGN);
 313     * - an alignment to a specified size, which may be more or less than
 314     *   the access size (MO_ALIGN_x where 'x' is a size in bytes);
 315     */
 316    MO_ASHIFT = 4,
 317    MO_AMASK = 7 << MO_ASHIFT,
 318#ifdef ALIGNED_ONLY
 319    MO_ALIGN = 0,
 320    MO_UNALN = MO_AMASK,
 321#else
 322    MO_ALIGN = MO_AMASK,
 323    MO_UNALN = 0,
 324#endif
 325    MO_ALIGN_2  = 1 << MO_ASHIFT,
 326    MO_ALIGN_4  = 2 << MO_ASHIFT,
 327    MO_ALIGN_8  = 3 << MO_ASHIFT,
 328    MO_ALIGN_16 = 4 << MO_ASHIFT,
 329    MO_ALIGN_32 = 5 << MO_ASHIFT,
 330    MO_ALIGN_64 = 6 << MO_ASHIFT,
 331
 332    /* Combinations of the above, for ease of use.  */
 333    MO_UB    = MO_8,
 334    MO_UW    = MO_16,
 335    MO_UL    = MO_32,
 336    MO_SB    = MO_SIGN | MO_8,
 337    MO_SW    = MO_SIGN | MO_16,
 338    MO_SL    = MO_SIGN | MO_32,
 339    MO_Q     = MO_64,
 340
 341    MO_LEUW  = MO_LE | MO_UW,
 342    MO_LEUL  = MO_LE | MO_UL,
 343    MO_LESW  = MO_LE | MO_SW,
 344    MO_LESL  = MO_LE | MO_SL,
 345    MO_LEQ   = MO_LE | MO_Q,
 346
 347    MO_BEUW  = MO_BE | MO_UW,
 348    MO_BEUL  = MO_BE | MO_UL,
 349    MO_BESW  = MO_BE | MO_SW,
 350    MO_BESL  = MO_BE | MO_SL,
 351    MO_BEQ   = MO_BE | MO_Q,
 352
 353    MO_TEUW  = MO_TE | MO_UW,
 354    MO_TEUL  = MO_TE | MO_UL,
 355    MO_TESW  = MO_TE | MO_SW,
 356    MO_TESL  = MO_TE | MO_SL,
 357    MO_TEQ   = MO_TE | MO_Q,
 358
 359    MO_SSIZE = MO_SIZE | MO_SIGN,
 360} TCGMemOp;
 361
 362/**
 363 * get_alignment_bits
 364 * @memop: TCGMemOp value
 365 *
 366 * Extract the alignment size from the memop.
 367 */
 368static inline unsigned get_alignment_bits(TCGMemOp memop)
 369{
 370    unsigned a = memop & MO_AMASK;
 371
 372    if (a == MO_UNALN) {
 373        /* No alignment required.  */
 374        a = 0;
 375    } else if (a == MO_ALIGN) {
 376        /* A natural alignment requirement.  */
 377        a = memop & MO_SIZE;
 378    } else {
 379        /* A specific alignment requirement.  */
 380        a = a >> MO_ASHIFT;
 381    }
 382#if defined(CONFIG_SOFTMMU)
 383    /* The requested alignment cannot overlap the TLB flags.  */
 384    tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0);
 385#endif
 386    return a;
 387}
 388
 389typedef tcg_target_ulong TCGArg;
 390
 391/* Define type and accessor macros for TCG variables.
 392
 393   TCG variables are the inputs and outputs of TCG ops, as described
 394   in tcg/README. Target CPU front-end code uses these types to deal
 395   with TCG variables as it emits TCG code via the tcg_gen_* functions.
 396   They come in several flavours:
 397    * TCGv_i32 : 32 bit integer type
 398    * TCGv_i64 : 64 bit integer type
 399    * TCGv_ptr : a host pointer type
 400    * TCGv : an integer type the same size as target_ulong
 401             (an alias for either TCGv_i32 or TCGv_i64)
 402   The compiler's type checking will complain if you mix them
 403   up and pass the wrong sized TCGv to a function.
 404
 405   Users of tcg_gen_* don't need to know about any of the internal
 406   details of these, and should treat them as opaque types.
 407   You won't be able to look inside them in a debugger either.
 408
 409   Internal implementation details follow:
 410
 411   Note that there is no definition of the structs TCGv_i32_d etc anywhere.
 412   This is deliberate, because the values we store in variables of type
 413   TCGv_i32 are not really pointers-to-structures. They're just small
 414   integers, but keeping them in pointer types like this means that the
 415   compiler will complain if you accidentally pass a TCGv_i32 to a
 416   function which takes a TCGv_i64, and so on. Only the internals of
 417   TCG need to care about the actual contents of the types.  */
 418
 419typedef struct TCGv_i32_d *TCGv_i32;
 420typedef struct TCGv_i64_d *TCGv_i64;
 421typedef struct TCGv_ptr_d *TCGv_ptr;
 422typedef TCGv_ptr TCGv_env;
 423#if TARGET_LONG_BITS == 32
 424#define TCGv TCGv_i32
 425#elif TARGET_LONG_BITS == 64
 426#define TCGv TCGv_i64
 427#else
 428#error Unhandled TARGET_LONG_BITS value
 429#endif
 430
 431/* See the comment before tcgv_i32_temp.  */
 432#define TCGV_UNUSED_I32(x) (x = (TCGv_i32)NULL)
 433#define TCGV_UNUSED_I64(x) (x = (TCGv_i64)NULL)
 434#define TCGV_UNUSED_PTR(x) (x = (TCGv_ptr)NULL)
 435
 436#define TCGV_IS_UNUSED_I32(x) ((x) == (TCGv_i32)NULL)
 437#define TCGV_IS_UNUSED_I64(x) ((x) == (TCGv_i64)NULL)
 438#define TCGV_IS_UNUSED_PTR(x) ((x) == (TCGv_ptr)NULL)
 439
 440/* call flags */
 441/* Helper does not read globals (either directly or through an exception). It
 442   implies TCG_CALL_NO_WRITE_GLOBALS. */
 443#define TCG_CALL_NO_READ_GLOBALS    0x0010
 444/* Helper does not write globals */
 445#define TCG_CALL_NO_WRITE_GLOBALS   0x0020
 446/* Helper can be safely suppressed if the return value is not used. */
 447#define TCG_CALL_NO_SIDE_EFFECTS    0x0040
 448
 449/* convenience version of most used call flags */
 450#define TCG_CALL_NO_RWG         TCG_CALL_NO_READ_GLOBALS
 451#define TCG_CALL_NO_WG          TCG_CALL_NO_WRITE_GLOBALS
 452#define TCG_CALL_NO_SE          TCG_CALL_NO_SIDE_EFFECTS
 453#define TCG_CALL_NO_RWG_SE      (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
 454#define TCG_CALL_NO_WG_SE       (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
 455
 456/* Used to align parameters.  See the comment before tcgv_i32_temp.  */
 457#define TCG_CALL_DUMMY_ARG      ((TCGArg)0)
 458
 459/* Conditions.  Note that these are laid out for easy manipulation by
 460   the functions below:
 461     bit 0 is used for inverting;
 462     bit 1 is signed,
 463     bit 2 is unsigned,
 464     bit 3 is used with bit 0 for swapping signed/unsigned.  */
 465typedef enum {
 466    /* non-signed */
 467    TCG_COND_NEVER  = 0 | 0 | 0 | 0,
 468    TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
 469    TCG_COND_EQ     = 8 | 0 | 0 | 0,
 470    TCG_COND_NE     = 8 | 0 | 0 | 1,
 471    /* signed */
 472    TCG_COND_LT     = 0 | 0 | 2 | 0,
 473    TCG_COND_GE     = 0 | 0 | 2 | 1,
 474    TCG_COND_LE     = 8 | 0 | 2 | 0,
 475    TCG_COND_GT     = 8 | 0 | 2 | 1,
 476    /* unsigned */
 477    TCG_COND_LTU    = 0 | 4 | 0 | 0,
 478    TCG_COND_GEU    = 0 | 4 | 0 | 1,
 479    TCG_COND_LEU    = 8 | 4 | 0 | 0,
 480    TCG_COND_GTU    = 8 | 4 | 0 | 1,
 481} TCGCond;
 482
 483/* Invert the sense of the comparison.  */
 484static inline TCGCond tcg_invert_cond(TCGCond c)
 485{
 486    return (TCGCond)(c ^ 1);
 487}
 488
 489/* Swap the operands in a comparison.  */
 490static inline TCGCond tcg_swap_cond(TCGCond c)
 491{
 492    return c & 6 ? (TCGCond)(c ^ 9) : c;
 493}
 494
 495/* Create an "unsigned" version of a "signed" comparison.  */
 496static inline TCGCond tcg_unsigned_cond(TCGCond c)
 497{
 498    return c & 2 ? (TCGCond)(c ^ 6) : c;
 499}
 500
 501/* Must a comparison be considered unsigned?  */
 502static inline bool is_unsigned_cond(TCGCond c)
 503{
 504    return (c & 4) != 0;
 505}
 506
 507/* Create a "high" version of a double-word comparison.
 508   This removes equality from a LTE or GTE comparison.  */
 509static inline TCGCond tcg_high_cond(TCGCond c)
 510{
 511    switch (c) {
 512    case TCG_COND_GE:
 513    case TCG_COND_LE:
 514    case TCG_COND_GEU:
 515    case TCG_COND_LEU:
 516        return (TCGCond)(c ^ 8);
 517    default:
 518        return c;
 519    }
 520}
 521
 522typedef enum TCGTempVal {
 523    TEMP_VAL_DEAD,
 524    TEMP_VAL_REG,
 525    TEMP_VAL_MEM,
 526    TEMP_VAL_CONST,
 527} TCGTempVal;
 528
 529typedef struct TCGTemp {
 530    TCGReg reg:8;
 531    TCGTempVal val_type:8;
 532    TCGType base_type:8;
 533    TCGType type:8;
 534    unsigned int fixed_reg:1;
 535    unsigned int indirect_reg:1;
 536    unsigned int indirect_base:1;
 537    unsigned int mem_coherent:1;
 538    unsigned int mem_allocated:1;
 539    /* If true, the temp is saved across both basic blocks and
 540       translation blocks.  */
 541    unsigned int temp_global:1;
 542    /* If true, the temp is saved across basic blocks but dead
 543       at the end of translation blocks.  If false, the temp is
 544       dead at the end of basic blocks.  */
 545    unsigned int temp_local:1;
 546    unsigned int temp_allocated:1;
 547
 548    tcg_target_long val;
 549    struct TCGTemp *mem_base;
 550    intptr_t mem_offset;
 551    const char *name;
 552
 553    /* Pass-specific information that can be stored for a temporary.
 554       One word worth of integer data, and one pointer to data
 555       allocated separately.  */
 556    uintptr_t state;
 557    void *state_ptr;
 558} TCGTemp;
 559
 560typedef struct TCGContext TCGContext;
 561
 562typedef struct TCGTempSet {
 563    unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
 564} TCGTempSet;
 565
 566/* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
 567   this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
 568   There are never more than 2 outputs, which means that we can store all
 569   dead + sync data within 16 bits.  */
 570#define DEAD_ARG  4
 571#define SYNC_ARG  1
 572typedef uint16_t TCGLifeData;
 573
 574/* The layout here is designed to avoid a bitfield crossing of
 575   a 32-bit boundary, which would cause GCC to add extra padding.  */
 576typedef struct TCGOp {
 577    TCGOpcode opc   : 8;        /*  8 */
 578
 579    /* The number of out and in parameter for a call.  */
 580    unsigned calli  : 4;        /* 12 */
 581    unsigned callo  : 2;        /* 14 */
 582    unsigned        : 2;        /* 16 */
 583
 584    /* Index of the prev/next op, or 0 for the end of the list.  */
 585    unsigned prev   : 16;       /* 32 */
 586    unsigned next   : 16;       /* 48 */
 587
 588    /* Lifetime data of the operands.  */
 589    unsigned life   : 16;       /* 64 */
 590
 591    /* Arguments for the opcode.  */
 592    TCGArg args[MAX_OPC_PARAM];
 593} TCGOp;
 594
 595/* Make sure that we don't expand the structure without noticing.  */
 596QEMU_BUILD_BUG_ON(sizeof(TCGOp) != 8 + sizeof(TCGArg) * MAX_OPC_PARAM);
 597
 598/* Make sure operands fit in the bitfields above.  */
 599QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
 600QEMU_BUILD_BUG_ON(OPC_BUF_SIZE > (1 << 16));
 601
 602typedef struct TCGProfile {
 603    int64_t tb_count1;
 604    int64_t tb_count;
 605    int64_t op_count; /* total insn count */
 606    int op_count_max; /* max insn per TB */
 607    int64_t temp_count;
 608    int temp_count_max;
 609    int64_t del_op_count;
 610    int64_t code_in_len;
 611    int64_t code_out_len;
 612    int64_t search_out_len;
 613    int64_t interm_time;
 614    int64_t code_time;
 615    int64_t la_time;
 616    int64_t opt_time;
 617    int64_t restore_count;
 618    int64_t restore_time;
 619    int64_t table_op_count[NB_OPS];
 620} TCGProfile;
 621
 622struct TCGContext {
 623    uint8_t *pool_cur, *pool_end;
 624    TCGPool *pool_first, *pool_current, *pool_first_large;
 625    int nb_labels;
 626    int nb_globals;
 627    int nb_temps;
 628    int nb_indirects;
 629
 630    /* goto_tb support */
 631    tcg_insn_unit *code_buf;
 632    uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
 633    uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
 634    uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
 635
 636    TCGRegSet reserved_regs;
 637    uint32_t tb_cflags; /* cflags of the current TB */
 638    intptr_t current_frame_offset;
 639    intptr_t frame_start;
 640    intptr_t frame_end;
 641    TCGTemp *frame_temp;
 642
 643    tcg_insn_unit *code_ptr;
 644
 645#ifdef CONFIG_PROFILER
 646    TCGProfile prof;
 647#endif
 648
 649#ifdef CONFIG_DEBUG_TCG
 650    int temps_in_use;
 651    int goto_tb_issue_mask;
 652#endif
 653
 654    int gen_next_op_idx;
 655
 656    /* Code generation.  Note that we specifically do not use tcg_insn_unit
 657       here, because there's too much arithmetic throughout that relies
 658       on addition and subtraction working on bytes.  Rely on the GCC
 659       extension that allows arithmetic on void*.  */
 660    void *code_gen_prologue;
 661    void *code_gen_epilogue;
 662    void *code_gen_buffer;
 663    size_t code_gen_buffer_size;
 664    void *code_gen_ptr;
 665    void *data_gen_ptr;
 666
 667    /* Threshold to flush the translated code buffer.  */
 668    void *code_gen_highwater;
 669
 670    /* Track which vCPU triggers events */
 671    CPUState *cpu;                      /* *_trans */
 672
 673    /* These structures are private to tcg-target.inc.c.  */
 674#ifdef TCG_TARGET_NEED_LDST_LABELS
 675    struct TCGLabelQemuLdst *ldst_labels;
 676#endif
 677#ifdef TCG_TARGET_NEED_POOL_LABELS
 678    struct TCGLabelPoolData *pool_labels;
 679#endif
 680
 681    TCGLabel *exitreq_label;
 682
 683    TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
 684    TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
 685
 686    /* Tells which temporary holds a given register.
 687       It does not take into account fixed registers */
 688    TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
 689
 690    TCGOp gen_op_buf[OPC_BUF_SIZE];
 691
 692    uint16_t gen_insn_end_off[TCG_MAX_INSNS];
 693    target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
 694};
 695
 696extern TCGContext tcg_init_ctx;
 697extern __thread TCGContext *tcg_ctx;
 698extern TCGv_env cpu_env;
 699
 700static inline size_t temp_idx(TCGTemp *ts)
 701{
 702    ptrdiff_t n = ts - tcg_ctx->temps;
 703    tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps);
 704    return n;
 705}
 706
 707static inline TCGArg temp_arg(TCGTemp *ts)
 708{
 709    return (uintptr_t)ts;
 710}
 711
 712static inline TCGTemp *arg_temp(TCGArg a)
 713{
 714    return (TCGTemp *)(uintptr_t)a;
 715}
 716
 717/* Using the offset of a temporary, relative to TCGContext, rather than
 718   its index means that we don't use 0.  That leaves offset 0 free for
 719   a NULL representation without having to leave index 0 unused.  */
 720static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v)
 721{
 722    uintptr_t o = (uintptr_t)v;
 723    TCGTemp *t = (void *)tcg_ctx + o;
 724    tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o);
 725    return t;
 726}
 727
 728static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
 729{
 730    return tcgv_i32_temp((TCGv_i32)v);
 731}
 732
 733static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
 734{
 735    return tcgv_i32_temp((TCGv_i32)v);
 736}
 737
 738static inline TCGArg tcgv_i32_arg(TCGv_i32 v)
 739{
 740    return temp_arg(tcgv_i32_temp(v));
 741}
 742
 743static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
 744{
 745    return temp_arg(tcgv_i64_temp(v));
 746}
 747
 748static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
 749{
 750    return temp_arg(tcgv_ptr_temp(v));
 751}
 752
 753static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t)
 754{
 755    (void)temp_idx(t); /* trigger embedded assert */
 756    return (TCGv_i32)((void *)t - (void *)tcg_ctx);
 757}
 758
 759static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
 760{
 761    return (TCGv_i64)temp_tcgv_i32(t);
 762}
 763
 764static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
 765{
 766    return (TCGv_ptr)temp_tcgv_i32(t);
 767}
 768
 769#if TCG_TARGET_REG_BITS == 32
 770static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
 771{
 772    return temp_tcgv_i32(tcgv_i64_temp(t));
 773}
 774
 775static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
 776{
 777    return temp_tcgv_i32(tcgv_i64_temp(t) + 1);
 778}
 779#endif
 780
 781static inline void tcg_set_insn_param(int op_idx, int arg, TCGArg v)
 782{
 783    tcg_ctx->gen_op_buf[op_idx].args[arg] = v;
 784}
 785
 786static inline void tcg_set_insn_start_param(int op_idx, int arg, target_ulong v)
 787{
 788#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
 789    tcg_set_insn_param(op_idx, arg, v);
 790#else
 791    tcg_set_insn_param(op_idx, arg * 2, v);
 792    tcg_set_insn_param(op_idx, arg * 2 + 1, v >> 32);
 793#endif
 794}
 795
 796/* The number of opcodes emitted so far.  */
 797static inline int tcg_op_buf_count(void)
 798{
 799    return tcg_ctx->gen_next_op_idx;
 800}
 801
 802/* Test for whether to terminate the TB for using too many opcodes.  */
 803static inline bool tcg_op_buf_full(void)
 804{
 805    return tcg_op_buf_count() >= OPC_MAX_SIZE;
 806}
 807
 808/* pool based memory allocation */
 809
 810/* user-mode: tb_lock must be held for tcg_malloc_internal. */
 811void *tcg_malloc_internal(TCGContext *s, int size);
 812void tcg_pool_reset(TCGContext *s);
 813TranslationBlock *tcg_tb_alloc(TCGContext *s);
 814
 815void tcg_region_init(void);
 816void tcg_region_reset_all(void);
 817
 818size_t tcg_code_size(void);
 819size_t tcg_code_capacity(void);
 820
 821/* user-mode: Called with tb_lock held.  */
 822static inline void *tcg_malloc(int size)
 823{
 824    TCGContext *s = tcg_ctx;
 825    uint8_t *ptr, *ptr_end;
 826
 827    /* ??? This is a weak placeholder for minimum malloc alignment.  */
 828    size = QEMU_ALIGN_UP(size, 8);
 829
 830    ptr = s->pool_cur;
 831    ptr_end = ptr + size;
 832    if (unlikely(ptr_end > s->pool_end)) {
 833        return tcg_malloc_internal(tcg_ctx, size);
 834    } else {
 835        s->pool_cur = ptr_end;
 836        return ptr;
 837    }
 838}
 839
 840void tcg_context_init(TCGContext *s);
 841void tcg_register_thread(void);
 842void tcg_prologue_init(TCGContext *s);
 843void tcg_func_start(TCGContext *s);
 844
 845int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
 846
 847void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
 848
 849TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
 850                                     intptr_t, const char *);
 851
 852TCGv_i32 tcg_temp_new_internal_i32(int temp_local);
 853TCGv_i64 tcg_temp_new_internal_i64(int temp_local);
 854
 855void tcg_temp_free_i32(TCGv_i32 arg);
 856void tcg_temp_free_i64(TCGv_i64 arg);
 857
 858static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
 859                                              const char *name)
 860{
 861    TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
 862    return temp_tcgv_i32(t);
 863}
 864
 865static inline TCGv_i32 tcg_temp_new_i32(void)
 866{
 867    return tcg_temp_new_internal_i32(0);
 868}
 869
 870static inline TCGv_i32 tcg_temp_local_new_i32(void)
 871{
 872    return tcg_temp_new_internal_i32(1);
 873}
 874
 875static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
 876                                              const char *name)
 877{
 878    TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
 879    return temp_tcgv_i64(t);
 880}
 881
 882static inline TCGv_i64 tcg_temp_new_i64(void)
 883{
 884    return tcg_temp_new_internal_i64(0);
 885}
 886
 887static inline TCGv_i64 tcg_temp_local_new_i64(void)
 888{
 889    return tcg_temp_new_internal_i64(1);
 890}
 891
 892#if defined(CONFIG_DEBUG_TCG)
 893/* If you call tcg_clear_temp_count() at the start of a section of
 894 * code which is not supposed to leak any TCG temporaries, then
 895 * calling tcg_check_temp_count() at the end of the section will
 896 * return 1 if the section did in fact leak a temporary.
 897 */
 898void tcg_clear_temp_count(void);
 899int tcg_check_temp_count(void);
 900#else
 901#define tcg_clear_temp_count() do { } while (0)
 902#define tcg_check_temp_count() 0
 903#endif
 904
 905void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf);
 906void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf);
 907
 908#define TCG_CT_ALIAS  0x80
 909#define TCG_CT_IALIAS 0x40
 910#define TCG_CT_NEWREG 0x20 /* output requires a new register */
 911#define TCG_CT_REG    0x01
 912#define TCG_CT_CONST  0x02 /* any constant of register size */
 913
 914typedef struct TCGArgConstraint {
 915    uint16_t ct;
 916    uint8_t alias_index;
 917    union {
 918        TCGRegSet regs;
 919    } u;
 920} TCGArgConstraint;
 921
 922#define TCG_MAX_OP_ARGS 16
 923
 924/* Bits for TCGOpDef->flags, 8 bits available.  */
 925enum {
 926    /* Instruction defines the end of a basic block.  */
 927    TCG_OPF_BB_END       = 0x01,
 928    /* Instruction clobbers call registers and potentially update globals.  */
 929    TCG_OPF_CALL_CLOBBER = 0x02,
 930    /* Instruction has side effects: it cannot be removed if its outputs
 931       are not used, and might trigger exceptions.  */
 932    TCG_OPF_SIDE_EFFECTS = 0x04,
 933    /* Instruction operands are 64-bits (otherwise 32-bits).  */
 934    TCG_OPF_64BIT        = 0x08,
 935    /* Instruction is optional and not implemented by the host, or insn
 936       is generic and should not be implemened by the host.  */
 937    TCG_OPF_NOT_PRESENT  = 0x10,
 938};
 939
 940typedef struct TCGOpDef {
 941    const char *name;
 942    uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
 943    uint8_t flags;
 944    TCGArgConstraint *args_ct;
 945    int *sorted_args;
 946#if defined(CONFIG_DEBUG_TCG)
 947    int used;
 948#endif
 949} TCGOpDef;
 950
 951extern TCGOpDef tcg_op_defs[];
 952extern const size_t tcg_op_defs_max;
 953
 954typedef struct TCGTargetOpDef {
 955    TCGOpcode op;
 956    const char *args_ct_str[TCG_MAX_OP_ARGS];
 957} TCGTargetOpDef;
 958
 959#define tcg_abort() \
 960do {\
 961    fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
 962    abort();\
 963} while (0)
 964
 965#if UINTPTR_MAX == UINT32_MAX
 966static inline TCGv_ptr TCGV_NAT_TO_PTR(TCGv_i32 n) { return (TCGv_ptr)n; }
 967static inline TCGv_i32 TCGV_PTR_TO_NAT(TCGv_ptr n) { return (TCGv_i32)n; }
 968
 969#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V)))
 970#define tcg_global_mem_new_ptr(R, O, N) \
 971    TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N)))
 972#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32())
 973#define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T))
 974#else
 975static inline TCGv_ptr TCGV_NAT_TO_PTR(TCGv_i64 n) { return (TCGv_ptr)n; }
 976static inline TCGv_i64 TCGV_PTR_TO_NAT(TCGv_ptr n) { return (TCGv_i64)n; }
 977
 978#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V)))
 979#define tcg_global_mem_new_ptr(R, O, N) \
 980    TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N)))
 981#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64())
 982#define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T))
 983#endif
 984
 985bool tcg_op_supported(TCGOpcode op);
 986
 987void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
 988
 989void tcg_op_remove(TCGContext *s, TCGOp *op);
 990TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
 991TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
 992
 993void tcg_optimize(TCGContext *s);
 994
 995/* only used for debugging purposes */
 996void tcg_dump_ops(TCGContext *s);
 997
 998TCGv_i32 tcg_const_i32(int32_t val);
 999TCGv_i64 tcg_const_i64(int64_t val);
1000TCGv_i32 tcg_const_local_i32(int32_t val);
1001TCGv_i64 tcg_const_local_i64(int64_t val);
1002
1003TCGLabel *gen_new_label(void);
1004
1005/**
1006 * label_arg
1007 * @l: label
1008 *
1009 * Encode a label for storage in the TCG opcode stream.
1010 */
1011
1012static inline TCGArg label_arg(TCGLabel *l)
1013{
1014    return (uintptr_t)l;
1015}
1016
1017/**
1018 * arg_label
1019 * @i: value
1020 *
1021 * The opposite of label_arg.  Retrieve a label from the
1022 * encoding of the TCG opcode stream.
1023 */
1024
1025static inline TCGLabel *arg_label(TCGArg i)
1026{
1027    return (TCGLabel *)(uintptr_t)i;
1028}
1029
1030/**
1031 * tcg_ptr_byte_diff
1032 * @a, @b: addresses to be differenced
1033 *
1034 * There are many places within the TCG backends where we need a byte
1035 * difference between two pointers.  While this can be accomplished
1036 * with local casting, it's easy to get wrong -- especially if one is
1037 * concerned with the signedness of the result.
1038 *
1039 * This version relies on GCC's void pointer arithmetic to get the
1040 * correct result.
1041 */
1042
1043static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
1044{
1045    return a - b;
1046}
1047
1048/**
1049 * tcg_pcrel_diff
1050 * @s: the tcg context
1051 * @target: address of the target
1052 *
1053 * Produce a pc-relative difference, from the current code_ptr
1054 * to the destination address.
1055 */
1056
1057static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target)
1058{
1059    return tcg_ptr_byte_diff(target, s->code_ptr);
1060}
1061
1062/**
1063 * tcg_current_code_size
1064 * @s: the tcg context
1065 *
1066 * Compute the current code size within the translation block.
1067 * This is used to fill in qemu's data structures for goto_tb.
1068 */
1069
1070static inline size_t tcg_current_code_size(TCGContext *s)
1071{
1072    return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
1073}
1074
1075/* Combine the TCGMemOp and mmu_idx parameters into a single value.  */
1076typedef uint32_t TCGMemOpIdx;
1077
1078/**
1079 * make_memop_idx
1080 * @op: memory operation
1081 * @idx: mmu index
1082 *
1083 * Encode these values into a single parameter.
1084 */
1085static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
1086{
1087    tcg_debug_assert(idx <= 15);
1088    return (op << 4) | idx;
1089}
1090
1091/**
1092 * get_memop
1093 * @oi: combined op/idx parameter
1094 *
1095 * Extract the memory operation from the combined value.
1096 */
1097static inline TCGMemOp get_memop(TCGMemOpIdx oi)
1098{
1099    return oi >> 4;
1100}
1101
1102/**
1103 * get_mmuidx
1104 * @oi: combined op/idx parameter
1105 *
1106 * Extract the mmu index from the combined value.
1107 */
1108static inline unsigned get_mmuidx(TCGMemOpIdx oi)
1109{
1110    return oi & 15;
1111}
1112
1113/**
1114 * tcg_qemu_tb_exec:
1115 * @env: pointer to CPUArchState for the CPU
1116 * @tb_ptr: address of generated code for the TB to execute
1117 *
1118 * Start executing code from a given translation block.
1119 * Where translation blocks have been linked, execution
1120 * may proceed from the given TB into successive ones.
1121 * Control eventually returns only when some action is needed
1122 * from the top-level loop: either control must pass to a TB
1123 * which has not yet been directly linked, or an asynchronous
1124 * event such as an interrupt needs handling.
1125 *
1126 * Return: The return value is the value passed to the corresponding
1127 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
1128 * The value is either zero or a 4-byte aligned pointer to that TB combined
1129 * with additional information in its two least significant bits. The
1130 * additional information is encoded as follows:
1131 *  0, 1: the link between this TB and the next is via the specified
1132 *        TB index (0 or 1). That is, we left the TB via (the equivalent
1133 *        of) "goto_tb <index>". The main loop uses this to determine
1134 *        how to link the TB just executed to the next.
1135 *  2:    we are using instruction counting code generation, and we
1136 *        did not start executing this TB because the instruction counter
1137 *        would hit zero midway through it. In this case the pointer
1138 *        returned is the TB we were about to execute, and the caller must
1139 *        arrange to execute the remaining count of instructions.
1140 *  3:    we stopped because the CPU's exit_request flag was set
1141 *        (usually meaning that there is an interrupt that needs to be
1142 *        handled). The pointer returned is the TB we were about to execute
1143 *        when we noticed the pending exit request.
1144 *
1145 * If the bottom two bits indicate an exit-via-index then the CPU
1146 * state is correctly synchronised and ready for execution of the next
1147 * TB (and in particular the guest PC is the address to execute next).
1148 * Otherwise, we gave up on execution of this TB before it started, and
1149 * the caller must fix up the CPU state by calling the CPU's
1150 * synchronize_from_tb() method with the TB pointer we return (falling
1151 * back to calling the CPU's set_pc method with tb->pb if no
1152 * synchronize_from_tb() method exists).
1153 *
1154 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
1155 * to this default (which just calls the prologue.code emitted by
1156 * tcg_target_qemu_prologue()).
1157 */
1158#define TB_EXIT_MASK 3
1159#define TB_EXIT_IDX0 0
1160#define TB_EXIT_IDX1 1
1161#define TB_EXIT_REQUESTED 3
1162
1163#ifdef HAVE_TCG_QEMU_TB_EXEC
1164uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
1165#else
1166# define tcg_qemu_tb_exec(env, tb_ptr) \
1167    ((uintptr_t (*)(void *, void *))tcg_ctx->code_gen_prologue)(env, tb_ptr)
1168#endif
1169
1170void tcg_register_jit(void *buf, size_t buf_size);
1171
1172/*
1173 * Memory helpers that will be used by TCG generated code.
1174 */
1175#ifdef CONFIG_SOFTMMU
1176/* Value zero-extended to tcg register size.  */
1177tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1178                                     TCGMemOpIdx oi, uintptr_t retaddr);
1179tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1180                                    TCGMemOpIdx oi, uintptr_t retaddr);
1181tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1182                                    TCGMemOpIdx oi, uintptr_t retaddr);
1183uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1184                           TCGMemOpIdx oi, uintptr_t retaddr);
1185tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1186                                    TCGMemOpIdx oi, uintptr_t retaddr);
1187tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1188                                    TCGMemOpIdx oi, uintptr_t retaddr);
1189uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1190                           TCGMemOpIdx oi, uintptr_t retaddr);
1191
1192/* Value sign-extended to tcg register size.  */
1193tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1194                                     TCGMemOpIdx oi, uintptr_t retaddr);
1195tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1196                                    TCGMemOpIdx oi, uintptr_t retaddr);
1197tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1198                                    TCGMemOpIdx oi, uintptr_t retaddr);
1199tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1200                                    TCGMemOpIdx oi, uintptr_t retaddr);
1201tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1202                                    TCGMemOpIdx oi, uintptr_t retaddr);
1203
1204void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1205                        TCGMemOpIdx oi, uintptr_t retaddr);
1206void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1207                       TCGMemOpIdx oi, uintptr_t retaddr);
1208void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1209                       TCGMemOpIdx oi, uintptr_t retaddr);
1210void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1211                       TCGMemOpIdx oi, uintptr_t retaddr);
1212void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1213                       TCGMemOpIdx oi, uintptr_t retaddr);
1214void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1215                       TCGMemOpIdx oi, uintptr_t retaddr);
1216void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1217                       TCGMemOpIdx oi, uintptr_t retaddr);
1218
1219uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1220                            TCGMemOpIdx oi, uintptr_t retaddr);
1221uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1222                            TCGMemOpIdx oi, uintptr_t retaddr);
1223uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1224                            TCGMemOpIdx oi, uintptr_t retaddr);
1225uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1226                            TCGMemOpIdx oi, uintptr_t retaddr);
1227uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1228                            TCGMemOpIdx oi, uintptr_t retaddr);
1229uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1230                            TCGMemOpIdx oi, uintptr_t retaddr);
1231uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1232                            TCGMemOpIdx oi, uintptr_t retaddr);
1233
1234/* Temporary aliases until backends are converted.  */
1235#ifdef TARGET_WORDS_BIGENDIAN
1236# define helper_ret_ldsw_mmu  helper_be_ldsw_mmu
1237# define helper_ret_lduw_mmu  helper_be_lduw_mmu
1238# define helper_ret_ldsl_mmu  helper_be_ldsl_mmu
1239# define helper_ret_ldul_mmu  helper_be_ldul_mmu
1240# define helper_ret_ldl_mmu   helper_be_ldul_mmu
1241# define helper_ret_ldq_mmu   helper_be_ldq_mmu
1242# define helper_ret_stw_mmu   helper_be_stw_mmu
1243# define helper_ret_stl_mmu   helper_be_stl_mmu
1244# define helper_ret_stq_mmu   helper_be_stq_mmu
1245# define helper_ret_ldw_cmmu  helper_be_ldw_cmmu
1246# define helper_ret_ldl_cmmu  helper_be_ldl_cmmu
1247# define helper_ret_ldq_cmmu  helper_be_ldq_cmmu
1248#else
1249# define helper_ret_ldsw_mmu  helper_le_ldsw_mmu
1250# define helper_ret_lduw_mmu  helper_le_lduw_mmu
1251# define helper_ret_ldsl_mmu  helper_le_ldsl_mmu
1252# define helper_ret_ldul_mmu  helper_le_ldul_mmu
1253# define helper_ret_ldl_mmu   helper_le_ldul_mmu
1254# define helper_ret_ldq_mmu   helper_le_ldq_mmu
1255# define helper_ret_stw_mmu   helper_le_stw_mmu
1256# define helper_ret_stl_mmu   helper_le_stl_mmu
1257# define helper_ret_stq_mmu   helper_le_stq_mmu
1258# define helper_ret_ldw_cmmu  helper_le_ldw_cmmu
1259# define helper_ret_ldl_cmmu  helper_le_ldl_cmmu
1260# define helper_ret_ldq_cmmu  helper_le_ldq_cmmu
1261#endif
1262
1263uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
1264                                    uint32_t cmpv, uint32_t newv,
1265                                    TCGMemOpIdx oi, uintptr_t retaddr);
1266uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
1267                                       uint32_t cmpv, uint32_t newv,
1268                                       TCGMemOpIdx oi, uintptr_t retaddr);
1269uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
1270                                       uint32_t cmpv, uint32_t newv,
1271                                       TCGMemOpIdx oi, uintptr_t retaddr);
1272uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
1273                                       uint64_t cmpv, uint64_t newv,
1274                                       TCGMemOpIdx oi, uintptr_t retaddr);
1275uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
1276                                       uint32_t cmpv, uint32_t newv,
1277                                       TCGMemOpIdx oi, uintptr_t retaddr);
1278uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
1279                                       uint32_t cmpv, uint32_t newv,
1280                                       TCGMemOpIdx oi, uintptr_t retaddr);
1281uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
1282                                       uint64_t cmpv, uint64_t newv,
1283                                       TCGMemOpIdx oi, uintptr_t retaddr);
1284
1285#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX)         \
1286TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu         \
1287    (CPUArchState *env, target_ulong addr, TYPE val,  \
1288     TCGMemOpIdx oi, uintptr_t retaddr);
1289
1290#ifdef CONFIG_ATOMIC64
1291#define GEN_ATOMIC_HELPER_ALL(NAME)          \
1292    GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
1293    GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
1294    GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
1295    GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
1296    GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)  \
1297    GEN_ATOMIC_HELPER(NAME, uint64_t, q_le)  \
1298    GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
1299#else
1300#define GEN_ATOMIC_HELPER_ALL(NAME)          \
1301    GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
1302    GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
1303    GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
1304    GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
1305    GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
1306#endif
1307
1308GEN_ATOMIC_HELPER_ALL(fetch_add)
1309GEN_ATOMIC_HELPER_ALL(fetch_sub)
1310GEN_ATOMIC_HELPER_ALL(fetch_and)
1311GEN_ATOMIC_HELPER_ALL(fetch_or)
1312GEN_ATOMIC_HELPER_ALL(fetch_xor)
1313
1314GEN_ATOMIC_HELPER_ALL(add_fetch)
1315GEN_ATOMIC_HELPER_ALL(sub_fetch)
1316GEN_ATOMIC_HELPER_ALL(and_fetch)
1317GEN_ATOMIC_HELPER_ALL(or_fetch)
1318GEN_ATOMIC_HELPER_ALL(xor_fetch)
1319
1320GEN_ATOMIC_HELPER_ALL(xchg)
1321
1322#undef GEN_ATOMIC_HELPER_ALL
1323#undef GEN_ATOMIC_HELPER
1324#endif /* CONFIG_SOFTMMU */
1325
1326#ifdef CONFIG_ATOMIC128
1327#include "qemu/int128.h"
1328
1329/* These aren't really a "proper" helpers because TCG cannot manage Int128.
1330   However, use the same format as the others, for use by the backends. */
1331Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
1332                                     Int128 cmpv, Int128 newv,
1333                                     TCGMemOpIdx oi, uintptr_t retaddr);
1334Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
1335                                     Int128 cmpv, Int128 newv,
1336                                     TCGMemOpIdx oi, uintptr_t retaddr);
1337
1338Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
1339                                TCGMemOpIdx oi, uintptr_t retaddr);
1340Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
1341                                TCGMemOpIdx oi, uintptr_t retaddr);
1342void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1343                              TCGMemOpIdx oi, uintptr_t retaddr);
1344void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1345                              TCGMemOpIdx oi, uintptr_t retaddr);
1346
1347#endif /* CONFIG_ATOMIC128 */
1348
1349#endif /* TCG_H */
1350