qemu/tcg/tcg.h
<<
>>
Prefs
   1/*
   2 * Tiny Code Generator for QEMU
   3 *
   4 * Copyright (c) 2008 Fabrice Bellard
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#ifndef TCG_H
  26#define TCG_H
  27
  28#include "qemu-common.h"
  29#include "cpu.h"
  30#include "exec/tb-context.h"
  31#include "qemu/bitops.h"
  32#include "qemu/queue.h"
  33#include "tcg-mo.h"
  34#include "tcg-target.h"
  35
  36/* XXX: make safe guess about sizes */
  37#define MAX_OP_PER_INSTR 266
  38
  39#if HOST_LONG_BITS == 32
  40#define MAX_OPC_PARAM_PER_ARG 2
  41#else
  42#define MAX_OPC_PARAM_PER_ARG 1
  43#endif
  44#define MAX_OPC_PARAM_IARGS 6
  45#define MAX_OPC_PARAM_OARGS 1
  46#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
  47
  48/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
  49 * and up to 4 + N parameters on 64-bit archs
  50 * (N = number of input arguments + output arguments).  */
  51#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
  52
  53#define CPU_TEMP_BUF_NLONGS 128
  54
  55/* Default target word size to pointer size.  */
  56#ifndef TCG_TARGET_REG_BITS
  57# if UINTPTR_MAX == UINT32_MAX
  58#  define TCG_TARGET_REG_BITS 32
  59# elif UINTPTR_MAX == UINT64_MAX
  60#  define TCG_TARGET_REG_BITS 64
  61# else
  62#  error Unknown pointer size for tcg target
  63# endif
  64#endif
  65
  66#if TCG_TARGET_REG_BITS == 32
  67typedef int32_t tcg_target_long;
  68typedef uint32_t tcg_target_ulong;
  69#define TCG_PRIlx PRIx32
  70#define TCG_PRIld PRId32
  71#elif TCG_TARGET_REG_BITS == 64
  72typedef int64_t tcg_target_long;
  73typedef uint64_t tcg_target_ulong;
  74#define TCG_PRIlx PRIx64
  75#define TCG_PRIld PRId64
  76#else
  77#error unsupported
  78#endif
  79
  80/* Oversized TCG guests make things like MTTCG hard
  81 * as we can't use atomics for cputlb updates.
  82 */
  83#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
  84#define TCG_OVERSIZED_GUEST 1
  85#else
  86#define TCG_OVERSIZED_GUEST 0
  87#endif
  88
  89#if TCG_TARGET_NB_REGS <= 32
  90typedef uint32_t TCGRegSet;
  91#elif TCG_TARGET_NB_REGS <= 64
  92typedef uint64_t TCGRegSet;
  93#else
  94#error unsupported
  95#endif
  96
  97#if TCG_TARGET_REG_BITS == 32
  98/* Turn some undef macros into false macros.  */
  99#define TCG_TARGET_HAS_extrl_i64_i32    0
 100#define TCG_TARGET_HAS_extrh_i64_i32    0
 101#define TCG_TARGET_HAS_div_i64          0
 102#define TCG_TARGET_HAS_rem_i64          0
 103#define TCG_TARGET_HAS_div2_i64         0
 104#define TCG_TARGET_HAS_rot_i64          0
 105#define TCG_TARGET_HAS_ext8s_i64        0
 106#define TCG_TARGET_HAS_ext16s_i64       0
 107#define TCG_TARGET_HAS_ext32s_i64       0
 108#define TCG_TARGET_HAS_ext8u_i64        0
 109#define TCG_TARGET_HAS_ext16u_i64       0
 110#define TCG_TARGET_HAS_ext32u_i64       0
 111#define TCG_TARGET_HAS_bswap16_i64      0
 112#define TCG_TARGET_HAS_bswap32_i64      0
 113#define TCG_TARGET_HAS_bswap64_i64      0
 114#define TCG_TARGET_HAS_neg_i64          0
 115#define TCG_TARGET_HAS_not_i64          0
 116#define TCG_TARGET_HAS_andc_i64         0
 117#define TCG_TARGET_HAS_orc_i64          0
 118#define TCG_TARGET_HAS_eqv_i64          0
 119#define TCG_TARGET_HAS_nand_i64         0
 120#define TCG_TARGET_HAS_nor_i64          0
 121#define TCG_TARGET_HAS_clz_i64          0
 122#define TCG_TARGET_HAS_ctz_i64          0
 123#define TCG_TARGET_HAS_ctpop_i64        0
 124#define TCG_TARGET_HAS_deposit_i64      0
 125#define TCG_TARGET_HAS_extract_i64      0
 126#define TCG_TARGET_HAS_sextract_i64     0
 127#define TCG_TARGET_HAS_movcond_i64      0
 128#define TCG_TARGET_HAS_add2_i64         0
 129#define TCG_TARGET_HAS_sub2_i64         0
 130#define TCG_TARGET_HAS_mulu2_i64        0
 131#define TCG_TARGET_HAS_muls2_i64        0
 132#define TCG_TARGET_HAS_muluh_i64        0
 133#define TCG_TARGET_HAS_mulsh_i64        0
 134/* Turn some undef macros into true macros.  */
 135#define TCG_TARGET_HAS_add2_i32         1
 136#define TCG_TARGET_HAS_sub2_i32         1
 137#endif
 138
 139#ifndef TCG_TARGET_deposit_i32_valid
 140#define TCG_TARGET_deposit_i32_valid(ofs, len) 1
 141#endif
 142#ifndef TCG_TARGET_deposit_i64_valid
 143#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
 144#endif
 145#ifndef TCG_TARGET_extract_i32_valid
 146#define TCG_TARGET_extract_i32_valid(ofs, len) 1
 147#endif
 148#ifndef TCG_TARGET_extract_i64_valid
 149#define TCG_TARGET_extract_i64_valid(ofs, len) 1
 150#endif
 151
 152/* Only one of DIV or DIV2 should be defined.  */
 153#if defined(TCG_TARGET_HAS_div_i32)
 154#define TCG_TARGET_HAS_div2_i32         0
 155#elif defined(TCG_TARGET_HAS_div2_i32)
 156#define TCG_TARGET_HAS_div_i32          0
 157#define TCG_TARGET_HAS_rem_i32          0
 158#endif
 159#if defined(TCG_TARGET_HAS_div_i64)
 160#define TCG_TARGET_HAS_div2_i64         0
 161#elif defined(TCG_TARGET_HAS_div2_i64)
 162#define TCG_TARGET_HAS_div_i64          0
 163#define TCG_TARGET_HAS_rem_i64          0
 164#endif
 165
 166/* For 32-bit targets, some sort of unsigned widening multiply is required.  */
 167#if TCG_TARGET_REG_BITS == 32 \
 168    && !(defined(TCG_TARGET_HAS_mulu2_i32) \
 169         || defined(TCG_TARGET_HAS_muluh_i32))
 170# error "Missing unsigned widening multiply"
 171#endif
 172
 173#if !defined(TCG_TARGET_HAS_v64) \
 174    && !defined(TCG_TARGET_HAS_v128) \
 175    && !defined(TCG_TARGET_HAS_v256)
 176#define TCG_TARGET_MAYBE_vec            0
 177#define TCG_TARGET_HAS_neg_vec          0
 178#define TCG_TARGET_HAS_not_vec          0
 179#define TCG_TARGET_HAS_andc_vec         0
 180#define TCG_TARGET_HAS_orc_vec          0
 181#define TCG_TARGET_HAS_shi_vec          0
 182#define TCG_TARGET_HAS_shs_vec          0
 183#define TCG_TARGET_HAS_shv_vec          0
 184#define TCG_TARGET_HAS_mul_vec          0
 185#else
 186#define TCG_TARGET_MAYBE_vec            1
 187#endif
 188#ifndef TCG_TARGET_HAS_v64
 189#define TCG_TARGET_HAS_v64              0
 190#endif
 191#ifndef TCG_TARGET_HAS_v128
 192#define TCG_TARGET_HAS_v128             0
 193#endif
 194#ifndef TCG_TARGET_HAS_v256
 195#define TCG_TARGET_HAS_v256             0
 196#endif
 197
 198#ifndef TARGET_INSN_START_EXTRA_WORDS
 199# define TARGET_INSN_START_WORDS 1
 200#else
 201# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
 202#endif
 203
 204typedef enum TCGOpcode {
 205#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
 206#include "tcg-opc.h"
 207#undef DEF
 208    NB_OPS,
 209} TCGOpcode;
 210
 211#define tcg_regset_set_reg(d, r)   ((d) |= (TCGRegSet)1 << (r))
 212#define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r)))
 213#define tcg_regset_test_reg(d, r)  (((d) >> (r)) & 1)
 214
 215#ifndef TCG_TARGET_INSN_UNIT_SIZE
 216# error "Missing TCG_TARGET_INSN_UNIT_SIZE"
 217#elif TCG_TARGET_INSN_UNIT_SIZE == 1
 218typedef uint8_t tcg_insn_unit;
 219#elif TCG_TARGET_INSN_UNIT_SIZE == 2
 220typedef uint16_t tcg_insn_unit;
 221#elif TCG_TARGET_INSN_UNIT_SIZE == 4
 222typedef uint32_t tcg_insn_unit;
 223#elif TCG_TARGET_INSN_UNIT_SIZE == 8
 224typedef uint64_t tcg_insn_unit;
 225#else
 226/* The port better have done this.  */
 227#endif
 228
 229
 230#if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
 231# define tcg_debug_assert(X) do { assert(X); } while (0)
 232#elif QEMU_GNUC_PREREQ(4, 5)
 233# define tcg_debug_assert(X) \
 234    do { if (!(X)) { __builtin_unreachable(); } } while (0)
 235#else
 236# define tcg_debug_assert(X) do { (void)(X); } while (0)
 237#endif
 238
 239typedef struct TCGRelocation {
 240    struct TCGRelocation *next;
 241    int type;
 242    tcg_insn_unit *ptr;
 243    intptr_t addend;
 244} TCGRelocation; 
 245
 246typedef struct TCGLabel {
 247    unsigned has_value : 1;
 248    unsigned id : 31;
 249    union {
 250        uintptr_t value;
 251        tcg_insn_unit *value_ptr;
 252        TCGRelocation *first_reloc;
 253    } u;
 254} TCGLabel;
 255
 256typedef struct TCGPool {
 257    struct TCGPool *next;
 258    int size;
 259    uint8_t data[0] __attribute__ ((aligned));
 260} TCGPool;
 261
 262#define TCG_POOL_CHUNK_SIZE 32768
 263
 264#define TCG_MAX_TEMPS 512
 265#define TCG_MAX_INSNS 512
 266
 267/* when the size of the arguments of a called function is smaller than
 268   this value, they are statically allocated in the TB stack frame */
 269#define TCG_STATIC_CALL_ARGS_SIZE 128
 270
 271typedef enum TCGType {
 272    TCG_TYPE_I32,
 273    TCG_TYPE_I64,
 274
 275    TCG_TYPE_V64,
 276    TCG_TYPE_V128,
 277    TCG_TYPE_V256,
 278
 279    TCG_TYPE_COUNT, /* number of different types */
 280
 281    /* An alias for the size of the host register.  */
 282#if TCG_TARGET_REG_BITS == 32
 283    TCG_TYPE_REG = TCG_TYPE_I32,
 284#else
 285    TCG_TYPE_REG = TCG_TYPE_I64,
 286#endif
 287
 288    /* An alias for the size of the native pointer.  */
 289#if UINTPTR_MAX == UINT32_MAX
 290    TCG_TYPE_PTR = TCG_TYPE_I32,
 291#else
 292    TCG_TYPE_PTR = TCG_TYPE_I64,
 293#endif
 294
 295    /* An alias for the size of the target "long", aka register.  */
 296#if TARGET_LONG_BITS == 64
 297    TCG_TYPE_TL = TCG_TYPE_I64,
 298#else
 299    TCG_TYPE_TL = TCG_TYPE_I32,
 300#endif
 301} TCGType;
 302
 303/* Constants for qemu_ld and qemu_st for the Memory Operation field.  */
 304typedef enum TCGMemOp {
 305    MO_8     = 0,
 306    MO_16    = 1,
 307    MO_32    = 2,
 308    MO_64    = 3,
 309    MO_SIZE  = 3,   /* Mask for the above.  */
 310
 311    MO_SIGN  = 4,   /* Sign-extended, otherwise zero-extended.  */
 312
 313    MO_BSWAP = 8,   /* Host reverse endian.  */
 314#ifdef HOST_WORDS_BIGENDIAN
 315    MO_LE    = MO_BSWAP,
 316    MO_BE    = 0,
 317#else
 318    MO_LE    = 0,
 319    MO_BE    = MO_BSWAP,
 320#endif
 321#ifdef TARGET_WORDS_BIGENDIAN
 322    MO_TE    = MO_BE,
 323#else
 324    MO_TE    = MO_LE,
 325#endif
 326
 327    /* MO_UNALN accesses are never checked for alignment.
 328     * MO_ALIGN accesses will result in a call to the CPU's
 329     * do_unaligned_access hook if the guest address is not aligned.
 330     * The default depends on whether the target CPU defines ALIGNED_ONLY.
 331     *
 332     * Some architectures (e.g. ARMv8) need the address which is aligned
 333     * to a size more than the size of the memory access.
 334     * Some architectures (e.g. SPARCv9) need an address which is aligned,
 335     * but less strictly than the natural alignment.
 336     *
 337     * MO_ALIGN supposes the alignment size is the size of a memory access.
 338     *
 339     * There are three options:
 340     * - unaligned access permitted (MO_UNALN).
 341     * - an alignment to the size of an access (MO_ALIGN);
 342     * - an alignment to a specified size, which may be more or less than
 343     *   the access size (MO_ALIGN_x where 'x' is a size in bytes);
 344     */
 345    MO_ASHIFT = 4,
 346    MO_AMASK = 7 << MO_ASHIFT,
 347#ifdef ALIGNED_ONLY
 348    MO_ALIGN = 0,
 349    MO_UNALN = MO_AMASK,
 350#else
 351    MO_ALIGN = MO_AMASK,
 352    MO_UNALN = 0,
 353#endif
 354    MO_ALIGN_2  = 1 << MO_ASHIFT,
 355    MO_ALIGN_4  = 2 << MO_ASHIFT,
 356    MO_ALIGN_8  = 3 << MO_ASHIFT,
 357    MO_ALIGN_16 = 4 << MO_ASHIFT,
 358    MO_ALIGN_32 = 5 << MO_ASHIFT,
 359    MO_ALIGN_64 = 6 << MO_ASHIFT,
 360
 361    /* Combinations of the above, for ease of use.  */
 362    MO_UB    = MO_8,
 363    MO_UW    = MO_16,
 364    MO_UL    = MO_32,
 365    MO_SB    = MO_SIGN | MO_8,
 366    MO_SW    = MO_SIGN | MO_16,
 367    MO_SL    = MO_SIGN | MO_32,
 368    MO_Q     = MO_64,
 369
 370    MO_LEUW  = MO_LE | MO_UW,
 371    MO_LEUL  = MO_LE | MO_UL,
 372    MO_LESW  = MO_LE | MO_SW,
 373    MO_LESL  = MO_LE | MO_SL,
 374    MO_LEQ   = MO_LE | MO_Q,
 375
 376    MO_BEUW  = MO_BE | MO_UW,
 377    MO_BEUL  = MO_BE | MO_UL,
 378    MO_BESW  = MO_BE | MO_SW,
 379    MO_BESL  = MO_BE | MO_SL,
 380    MO_BEQ   = MO_BE | MO_Q,
 381
 382    MO_TEUW  = MO_TE | MO_UW,
 383    MO_TEUL  = MO_TE | MO_UL,
 384    MO_TESW  = MO_TE | MO_SW,
 385    MO_TESL  = MO_TE | MO_SL,
 386    MO_TEQ   = MO_TE | MO_Q,
 387
 388    MO_SSIZE = MO_SIZE | MO_SIGN,
 389} TCGMemOp;
 390
 391/**
 392 * get_alignment_bits
 393 * @memop: TCGMemOp value
 394 *
 395 * Extract the alignment size from the memop.
 396 */
 397static inline unsigned get_alignment_bits(TCGMemOp memop)
 398{
 399    unsigned a = memop & MO_AMASK;
 400
 401    if (a == MO_UNALN) {
 402        /* No alignment required.  */
 403        a = 0;
 404    } else if (a == MO_ALIGN) {
 405        /* A natural alignment requirement.  */
 406        a = memop & MO_SIZE;
 407    } else {
 408        /* A specific alignment requirement.  */
 409        a = a >> MO_ASHIFT;
 410    }
 411#if defined(CONFIG_SOFTMMU)
 412    /* The requested alignment cannot overlap the TLB flags.  */
 413    tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0);
 414#endif
 415    return a;
 416}
 417
 418typedef tcg_target_ulong TCGArg;
 419
 420/* Define type and accessor macros for TCG variables.
 421
 422   TCG variables are the inputs and outputs of TCG ops, as described
 423   in tcg/README. Target CPU front-end code uses these types to deal
 424   with TCG variables as it emits TCG code via the tcg_gen_* functions.
 425   They come in several flavours:
 426    * TCGv_i32 : 32 bit integer type
 427    * TCGv_i64 : 64 bit integer type
 428    * TCGv_ptr : a host pointer type
 429    * TCGv_vec : a host vector type; the exact size is not exposed
 430                 to the CPU front-end code.
 431    * TCGv : an integer type the same size as target_ulong
 432             (an alias for either TCGv_i32 or TCGv_i64)
 433   The compiler's type checking will complain if you mix them
 434   up and pass the wrong sized TCGv to a function.
 435
 436   Users of tcg_gen_* don't need to know about any of the internal
 437   details of these, and should treat them as opaque types.
 438   You won't be able to look inside them in a debugger either.
 439
 440   Internal implementation details follow:
 441
 442   Note that there is no definition of the structs TCGv_i32_d etc anywhere.
 443   This is deliberate, because the values we store in variables of type
 444   TCGv_i32 are not really pointers-to-structures. They're just small
 445   integers, but keeping them in pointer types like this means that the
 446   compiler will complain if you accidentally pass a TCGv_i32 to a
 447   function which takes a TCGv_i64, and so on. Only the internals of
 448   TCG need to care about the actual contents of the types.  */
 449
 450typedef struct TCGv_i32_d *TCGv_i32;
 451typedef struct TCGv_i64_d *TCGv_i64;
 452typedef struct TCGv_ptr_d *TCGv_ptr;
 453typedef struct TCGv_vec_d *TCGv_vec;
 454typedef TCGv_ptr TCGv_env;
 455#if TARGET_LONG_BITS == 32
 456#define TCGv TCGv_i32
 457#elif TARGET_LONG_BITS == 64
 458#define TCGv TCGv_i64
 459#else
 460#error Unhandled TARGET_LONG_BITS value
 461#endif
 462
 463/* call flags */
 464/* Helper does not read globals (either directly or through an exception). It
 465   implies TCG_CALL_NO_WRITE_GLOBALS. */
 466#define TCG_CALL_NO_READ_GLOBALS    0x0010
 467/* Helper does not write globals */
 468#define TCG_CALL_NO_WRITE_GLOBALS   0x0020
 469/* Helper can be safely suppressed if the return value is not used. */
 470#define TCG_CALL_NO_SIDE_EFFECTS    0x0040
 471
 472/* convenience version of most used call flags */
 473#define TCG_CALL_NO_RWG         TCG_CALL_NO_READ_GLOBALS
 474#define TCG_CALL_NO_WG          TCG_CALL_NO_WRITE_GLOBALS
 475#define TCG_CALL_NO_SE          TCG_CALL_NO_SIDE_EFFECTS
 476#define TCG_CALL_NO_RWG_SE      (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
 477#define TCG_CALL_NO_WG_SE       (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
 478
 479/* Used to align parameters.  See the comment before tcgv_i32_temp.  */
 480#define TCG_CALL_DUMMY_ARG      ((TCGArg)0)
 481
 482/* Conditions.  Note that these are laid out for easy manipulation by
 483   the functions below:
 484     bit 0 is used for inverting;
 485     bit 1 is signed,
 486     bit 2 is unsigned,
 487     bit 3 is used with bit 0 for swapping signed/unsigned.  */
 488typedef enum {
 489    /* non-signed */
 490    TCG_COND_NEVER  = 0 | 0 | 0 | 0,
 491    TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
 492    TCG_COND_EQ     = 8 | 0 | 0 | 0,
 493    TCG_COND_NE     = 8 | 0 | 0 | 1,
 494    /* signed */
 495    TCG_COND_LT     = 0 | 0 | 2 | 0,
 496    TCG_COND_GE     = 0 | 0 | 2 | 1,
 497    TCG_COND_LE     = 8 | 0 | 2 | 0,
 498    TCG_COND_GT     = 8 | 0 | 2 | 1,
 499    /* unsigned */
 500    TCG_COND_LTU    = 0 | 4 | 0 | 0,
 501    TCG_COND_GEU    = 0 | 4 | 0 | 1,
 502    TCG_COND_LEU    = 8 | 4 | 0 | 0,
 503    TCG_COND_GTU    = 8 | 4 | 0 | 1,
 504} TCGCond;
 505
 506/* Invert the sense of the comparison.  */
 507static inline TCGCond tcg_invert_cond(TCGCond c)
 508{
 509    return (TCGCond)(c ^ 1);
 510}
 511
 512/* Swap the operands in a comparison.  */
 513static inline TCGCond tcg_swap_cond(TCGCond c)
 514{
 515    return c & 6 ? (TCGCond)(c ^ 9) : c;
 516}
 517
 518/* Create an "unsigned" version of a "signed" comparison.  */
 519static inline TCGCond tcg_unsigned_cond(TCGCond c)
 520{
 521    return c & 2 ? (TCGCond)(c ^ 6) : c;
 522}
 523
 524/* Create a "signed" version of an "unsigned" comparison.  */
 525static inline TCGCond tcg_signed_cond(TCGCond c)
 526{
 527    return c & 4 ? (TCGCond)(c ^ 6) : c;
 528}
 529
 530/* Must a comparison be considered unsigned?  */
 531static inline bool is_unsigned_cond(TCGCond c)
 532{
 533    return (c & 4) != 0;
 534}
 535
 536/* Create a "high" version of a double-word comparison.
 537   This removes equality from a LTE or GTE comparison.  */
 538static inline TCGCond tcg_high_cond(TCGCond c)
 539{
 540    switch (c) {
 541    case TCG_COND_GE:
 542    case TCG_COND_LE:
 543    case TCG_COND_GEU:
 544    case TCG_COND_LEU:
 545        return (TCGCond)(c ^ 8);
 546    default:
 547        return c;
 548    }
 549}
 550
 551typedef enum TCGTempVal {
 552    TEMP_VAL_DEAD,
 553    TEMP_VAL_REG,
 554    TEMP_VAL_MEM,
 555    TEMP_VAL_CONST,
 556} TCGTempVal;
 557
 558typedef struct TCGTemp {
 559    TCGReg reg:8;
 560    TCGTempVal val_type:8;
 561    TCGType base_type:8;
 562    TCGType type:8;
 563    unsigned int fixed_reg:1;
 564    unsigned int indirect_reg:1;
 565    unsigned int indirect_base:1;
 566    unsigned int mem_coherent:1;
 567    unsigned int mem_allocated:1;
 568    /* If true, the temp is saved across both basic blocks and
 569       translation blocks.  */
 570    unsigned int temp_global:1;
 571    /* If true, the temp is saved across basic blocks but dead
 572       at the end of translation blocks.  If false, the temp is
 573       dead at the end of basic blocks.  */
 574    unsigned int temp_local:1;
 575    unsigned int temp_allocated:1;
 576
 577    tcg_target_long val;
 578    struct TCGTemp *mem_base;
 579    intptr_t mem_offset;
 580    const char *name;
 581
 582    /* Pass-specific information that can be stored for a temporary.
 583       One word worth of integer data, and one pointer to data
 584       allocated separately.  */
 585    uintptr_t state;
 586    void *state_ptr;
 587} TCGTemp;
 588
 589typedef struct TCGContext TCGContext;
 590
 591typedef struct TCGTempSet {
 592    unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
 593} TCGTempSet;
 594
 595/* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
 596   this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
 597   There are never more than 2 outputs, which means that we can store all
 598   dead + sync data within 16 bits.  */
 599#define DEAD_ARG  4
 600#define SYNC_ARG  1
 601typedef uint16_t TCGLifeData;
 602
 603/* The layout here is designed to avoid a bitfield crossing of
 604   a 32-bit boundary, which would cause GCC to add extra padding.  */
 605typedef struct TCGOp {
 606    TCGOpcode opc   : 8;        /*  8 */
 607
 608    /* Parameters for this opcode.  See below.  */
 609    unsigned param1 : 4;        /* 12 */
 610    unsigned param2 : 4;        /* 16 */
 611
 612    /* Lifetime data of the operands.  */
 613    unsigned life   : 16;       /* 32 */
 614
 615    /* Next and previous opcodes.  */
 616    QTAILQ_ENTRY(TCGOp) link;
 617
 618    /* Arguments for the opcode.  */
 619    TCGArg args[MAX_OPC_PARAM];
 620} TCGOp;
 621
 622#define TCGOP_CALLI(X)    (X)->param1
 623#define TCGOP_CALLO(X)    (X)->param2
 624
 625#define TCGOP_VECL(X)     (X)->param1
 626#define TCGOP_VECE(X)     (X)->param2
 627
 628/* Make sure operands fit in the bitfields above.  */
 629QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
 630
 631typedef struct TCGProfile {
 632    int64_t tb_count1;
 633    int64_t tb_count;
 634    int64_t op_count; /* total insn count */
 635    int op_count_max; /* max insn per TB */
 636    int64_t temp_count;
 637    int temp_count_max;
 638    int64_t del_op_count;
 639    int64_t code_in_len;
 640    int64_t code_out_len;
 641    int64_t search_out_len;
 642    int64_t interm_time;
 643    int64_t code_time;
 644    int64_t la_time;
 645    int64_t opt_time;
 646    int64_t restore_count;
 647    int64_t restore_time;
 648    int64_t table_op_count[NB_OPS];
 649} TCGProfile;
 650
 651struct TCGContext {
 652    uint8_t *pool_cur, *pool_end;
 653    TCGPool *pool_first, *pool_current, *pool_first_large;
 654    int nb_labels;
 655    int nb_globals;
 656    int nb_temps;
 657    int nb_indirects;
 658
 659    /* goto_tb support */
 660    tcg_insn_unit *code_buf;
 661    uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
 662    uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
 663    uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
 664
 665    TCGRegSet reserved_regs;
 666    uint32_t tb_cflags; /* cflags of the current TB */
 667    intptr_t current_frame_offset;
 668    intptr_t frame_start;
 669    intptr_t frame_end;
 670    TCGTemp *frame_temp;
 671
 672    tcg_insn_unit *code_ptr;
 673
 674#ifdef CONFIG_PROFILER
 675    TCGProfile prof;
 676#endif
 677
 678#ifdef CONFIG_DEBUG_TCG
 679    int temps_in_use;
 680    int goto_tb_issue_mask;
 681#endif
 682
 683    /* Code generation.  Note that we specifically do not use tcg_insn_unit
 684       here, because there's too much arithmetic throughout that relies
 685       on addition and subtraction working on bytes.  Rely on the GCC
 686       extension that allows arithmetic on void*.  */
 687    void *code_gen_prologue;
 688    void *code_gen_epilogue;
 689    void *code_gen_buffer;
 690    size_t code_gen_buffer_size;
 691    void *code_gen_ptr;
 692    void *data_gen_ptr;
 693
 694    /* Threshold to flush the translated code buffer.  */
 695    void *code_gen_highwater;
 696
 697    /* Track which vCPU triggers events */
 698    CPUState *cpu;                      /* *_trans */
 699
 700    /* These structures are private to tcg-target.inc.c.  */
 701#ifdef TCG_TARGET_NEED_LDST_LABELS
 702    struct TCGLabelQemuLdst *ldst_labels;
 703#endif
 704#ifdef TCG_TARGET_NEED_POOL_LABELS
 705    struct TCGLabelPoolData *pool_labels;
 706#endif
 707
 708    TCGLabel *exitreq_label;
 709
 710    TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
 711    TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
 712
 713    QTAILQ_HEAD(TCGOpHead, TCGOp) ops, free_ops;
 714
 715    /* Tells which temporary holds a given register.
 716       It does not take into account fixed registers */
 717    TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
 718
 719    uint16_t gen_insn_end_off[TCG_MAX_INSNS];
 720    target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
 721};
 722
 723extern TCGContext tcg_init_ctx;
 724extern __thread TCGContext *tcg_ctx;
 725extern TCGv_env cpu_env;
 726
 727static inline size_t temp_idx(TCGTemp *ts)
 728{
 729    ptrdiff_t n = ts - tcg_ctx->temps;
 730    tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps);
 731    return n;
 732}
 733
 734static inline TCGArg temp_arg(TCGTemp *ts)
 735{
 736    return (uintptr_t)ts;
 737}
 738
 739static inline TCGTemp *arg_temp(TCGArg a)
 740{
 741    return (TCGTemp *)(uintptr_t)a;
 742}
 743
 744/* Using the offset of a temporary, relative to TCGContext, rather than
 745   its index means that we don't use 0.  That leaves offset 0 free for
 746   a NULL representation without having to leave index 0 unused.  */
 747static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v)
 748{
 749    uintptr_t o = (uintptr_t)v;
 750    TCGTemp *t = (void *)tcg_ctx + o;
 751    tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o);
 752    return t;
 753}
 754
 755static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
 756{
 757    return tcgv_i32_temp((TCGv_i32)v);
 758}
 759
 760static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
 761{
 762    return tcgv_i32_temp((TCGv_i32)v);
 763}
 764
 765static inline TCGTemp *tcgv_vec_temp(TCGv_vec v)
 766{
 767    return tcgv_i32_temp((TCGv_i32)v);
 768}
 769
 770static inline TCGArg tcgv_i32_arg(TCGv_i32 v)
 771{
 772    return temp_arg(tcgv_i32_temp(v));
 773}
 774
 775static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
 776{
 777    return temp_arg(tcgv_i64_temp(v));
 778}
 779
 780static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
 781{
 782    return temp_arg(tcgv_ptr_temp(v));
 783}
 784
 785static inline TCGArg tcgv_vec_arg(TCGv_vec v)
 786{
 787    return temp_arg(tcgv_vec_temp(v));
 788}
 789
 790static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t)
 791{
 792    (void)temp_idx(t); /* trigger embedded assert */
 793    return (TCGv_i32)((void *)t - (void *)tcg_ctx);
 794}
 795
 796static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
 797{
 798    return (TCGv_i64)temp_tcgv_i32(t);
 799}
 800
 801static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
 802{
 803    return (TCGv_ptr)temp_tcgv_i32(t);
 804}
 805
 806static inline TCGv_vec temp_tcgv_vec(TCGTemp *t)
 807{
 808    return (TCGv_vec)temp_tcgv_i32(t);
 809}
 810
 811#if TCG_TARGET_REG_BITS == 32
 812static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
 813{
 814    return temp_tcgv_i32(tcgv_i64_temp(t));
 815}
 816
 817static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
 818{
 819    return temp_tcgv_i32(tcgv_i64_temp(t) + 1);
 820}
 821#endif
 822
 823static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
 824{
 825    op->args[arg] = v;
 826}
 827
 828static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v)
 829{
 830#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
 831    tcg_set_insn_param(op, arg, v);
 832#else
 833    tcg_set_insn_param(op, arg * 2, v);
 834    tcg_set_insn_param(op, arg * 2 + 1, v >> 32);
 835#endif
 836}
 837
 838/* The last op that was emitted.  */
 839static inline TCGOp *tcg_last_op(void)
 840{
 841    return QTAILQ_LAST(&tcg_ctx->ops, TCGOpHead);
 842}
 843
 844/* Test for whether to terminate the TB for using too many opcodes.  */
 845static inline bool tcg_op_buf_full(void)
 846{
 847    return false;
 848}
 849
 850/* pool based memory allocation */
 851
 852/* user-mode: tb_lock must be held for tcg_malloc_internal. */
 853void *tcg_malloc_internal(TCGContext *s, int size);
 854void tcg_pool_reset(TCGContext *s);
 855TranslationBlock *tcg_tb_alloc(TCGContext *s);
 856
 857void tcg_region_init(void);
 858void tcg_region_reset_all(void);
 859
 860size_t tcg_code_size(void);
 861size_t tcg_code_capacity(void);
 862
 863/* user-mode: Called with tb_lock held.  */
 864static inline void *tcg_malloc(int size)
 865{
 866    TCGContext *s = tcg_ctx;
 867    uint8_t *ptr, *ptr_end;
 868
 869    /* ??? This is a weak placeholder for minimum malloc alignment.  */
 870    size = QEMU_ALIGN_UP(size, 8);
 871
 872    ptr = s->pool_cur;
 873    ptr_end = ptr + size;
 874    if (unlikely(ptr_end > s->pool_end)) {
 875        return tcg_malloc_internal(tcg_ctx, size);
 876    } else {
 877        s->pool_cur = ptr_end;
 878        return ptr;
 879    }
 880}
 881
 882void tcg_context_init(TCGContext *s);
 883void tcg_register_thread(void);
 884void tcg_prologue_init(TCGContext *s);
 885void tcg_func_start(TCGContext *s);
 886
 887int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
 888
 889void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
 890
 891TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
 892                                     intptr_t, const char *);
 893
 894TCGv_i32 tcg_temp_new_internal_i32(int temp_local);
 895TCGv_i64 tcg_temp_new_internal_i64(int temp_local);
 896TCGv_vec tcg_temp_new_vec(TCGType type);
 897TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
 898
 899void tcg_temp_free_i32(TCGv_i32 arg);
 900void tcg_temp_free_i64(TCGv_i64 arg);
 901void tcg_temp_free_vec(TCGv_vec arg);
 902
 903static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
 904                                              const char *name)
 905{
 906    TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
 907    return temp_tcgv_i32(t);
 908}
 909
 910static inline TCGv_i32 tcg_temp_new_i32(void)
 911{
 912    return tcg_temp_new_internal_i32(0);
 913}
 914
 915static inline TCGv_i32 tcg_temp_local_new_i32(void)
 916{
 917    return tcg_temp_new_internal_i32(1);
 918}
 919
 920static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
 921                                              const char *name)
 922{
 923    TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
 924    return temp_tcgv_i64(t);
 925}
 926
 927static inline TCGv_i64 tcg_temp_new_i64(void)
 928{
 929    return tcg_temp_new_internal_i64(0);
 930}
 931
 932static inline TCGv_i64 tcg_temp_local_new_i64(void)
 933{
 934    return tcg_temp_new_internal_i64(1);
 935}
 936
 937#if defined(CONFIG_DEBUG_TCG)
 938/* If you call tcg_clear_temp_count() at the start of a section of
 939 * code which is not supposed to leak any TCG temporaries, then
 940 * calling tcg_check_temp_count() at the end of the section will
 941 * return 1 if the section did in fact leak a temporary.
 942 */
 943void tcg_clear_temp_count(void);
 944int tcg_check_temp_count(void);
 945#else
 946#define tcg_clear_temp_count() do { } while (0)
 947#define tcg_check_temp_count() 0
 948#endif
 949
 950void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf);
 951void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf);
 952
 953#define TCG_CT_ALIAS  0x80
 954#define TCG_CT_IALIAS 0x40
 955#define TCG_CT_NEWREG 0x20 /* output requires a new register */
 956#define TCG_CT_REG    0x01
 957#define TCG_CT_CONST  0x02 /* any constant of register size */
 958
 959typedef struct TCGArgConstraint {
 960    uint16_t ct;
 961    uint8_t alias_index;
 962    union {
 963        TCGRegSet regs;
 964    } u;
 965} TCGArgConstraint;
 966
 967#define TCG_MAX_OP_ARGS 16
 968
 969/* Bits for TCGOpDef->flags, 8 bits available.  */
 970enum {
 971    /* Instruction defines the end of a basic block.  */
 972    TCG_OPF_BB_END       = 0x01,
 973    /* Instruction clobbers call registers and potentially update globals.  */
 974    TCG_OPF_CALL_CLOBBER = 0x02,
 975    /* Instruction has side effects: it cannot be removed if its outputs
 976       are not used, and might trigger exceptions.  */
 977    TCG_OPF_SIDE_EFFECTS = 0x04,
 978    /* Instruction operands are 64-bits (otherwise 32-bits).  */
 979    TCG_OPF_64BIT        = 0x08,
 980    /* Instruction is optional and not implemented by the host, or insn
 981       is generic and should not be implemened by the host.  */
 982    TCG_OPF_NOT_PRESENT  = 0x10,
 983    /* Instruction operands are vectors.  */
 984    TCG_OPF_VECTOR       = 0x20,
 985};
 986
 987typedef struct TCGOpDef {
 988    const char *name;
 989    uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
 990    uint8_t flags;
 991    TCGArgConstraint *args_ct;
 992    int *sorted_args;
 993#if defined(CONFIG_DEBUG_TCG)
 994    int used;
 995#endif
 996} TCGOpDef;
 997
 998extern TCGOpDef tcg_op_defs[];
 999extern const size_t tcg_op_defs_max;
1000
1001typedef struct TCGTargetOpDef {
1002    TCGOpcode op;
1003    const char *args_ct_str[TCG_MAX_OP_ARGS];
1004} TCGTargetOpDef;
1005
1006#define tcg_abort() \
1007do {\
1008    fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
1009    abort();\
1010} while (0)
1011
1012#if UINTPTR_MAX == UINT32_MAX
1013static inline TCGv_ptr TCGV_NAT_TO_PTR(TCGv_i32 n) { return (TCGv_ptr)n; }
1014static inline TCGv_i32 TCGV_PTR_TO_NAT(TCGv_ptr n) { return (TCGv_i32)n; }
1015
1016#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V)))
1017#define tcg_global_mem_new_ptr(R, O, N) \
1018    TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N)))
1019#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32())
1020#define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T))
1021#else
1022static inline TCGv_ptr TCGV_NAT_TO_PTR(TCGv_i64 n) { return (TCGv_ptr)n; }
1023static inline TCGv_i64 TCGV_PTR_TO_NAT(TCGv_ptr n) { return (TCGv_i64)n; }
1024
1025#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V)))
1026#define tcg_global_mem_new_ptr(R, O, N) \
1027    TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N)))
1028#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64())
1029#define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T))
1030#endif
1031
1032bool tcg_op_supported(TCGOpcode op);
1033
1034void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
1035
1036TCGOp *tcg_emit_op(TCGOpcode opc);
1037void tcg_op_remove(TCGContext *s, TCGOp *op);
1038TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
1039TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
1040
1041void tcg_optimize(TCGContext *s);
1042
1043/* only used for debugging purposes */
1044void tcg_dump_ops(TCGContext *s);
1045
1046TCGv_i32 tcg_const_i32(int32_t val);
1047TCGv_i64 tcg_const_i64(int64_t val);
1048TCGv_i32 tcg_const_local_i32(int32_t val);
1049TCGv_i64 tcg_const_local_i64(int64_t val);
1050TCGv_vec tcg_const_zeros_vec(TCGType);
1051TCGv_vec tcg_const_ones_vec(TCGType);
1052TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
1053TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
1054
1055TCGLabel *gen_new_label(void);
1056
1057/**
1058 * label_arg
1059 * @l: label
1060 *
1061 * Encode a label for storage in the TCG opcode stream.
1062 */
1063
1064static inline TCGArg label_arg(TCGLabel *l)
1065{
1066    return (uintptr_t)l;
1067}
1068
1069/**
1070 * arg_label
1071 * @i: value
1072 *
1073 * The opposite of label_arg.  Retrieve a label from the
1074 * encoding of the TCG opcode stream.
1075 */
1076
1077static inline TCGLabel *arg_label(TCGArg i)
1078{
1079    return (TCGLabel *)(uintptr_t)i;
1080}
1081
1082/**
1083 * tcg_ptr_byte_diff
1084 * @a, @b: addresses to be differenced
1085 *
1086 * There are many places within the TCG backends where we need a byte
1087 * difference between two pointers.  While this can be accomplished
1088 * with local casting, it's easy to get wrong -- especially if one is
1089 * concerned with the signedness of the result.
1090 *
1091 * This version relies on GCC's void pointer arithmetic to get the
1092 * correct result.
1093 */
1094
1095static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
1096{
1097    return a - b;
1098}
1099
1100/**
1101 * tcg_pcrel_diff
1102 * @s: the tcg context
1103 * @target: address of the target
1104 *
1105 * Produce a pc-relative difference, from the current code_ptr
1106 * to the destination address.
1107 */
1108
1109static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target)
1110{
1111    return tcg_ptr_byte_diff(target, s->code_ptr);
1112}
1113
1114/**
1115 * tcg_current_code_size
1116 * @s: the tcg context
1117 *
1118 * Compute the current code size within the translation block.
1119 * This is used to fill in qemu's data structures for goto_tb.
1120 */
1121
1122static inline size_t tcg_current_code_size(TCGContext *s)
1123{
1124    return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
1125}
1126
1127/* Combine the TCGMemOp and mmu_idx parameters into a single value.  */
1128typedef uint32_t TCGMemOpIdx;
1129
1130/**
1131 * make_memop_idx
1132 * @op: memory operation
1133 * @idx: mmu index
1134 *
1135 * Encode these values into a single parameter.
1136 */
1137static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
1138{
1139    tcg_debug_assert(idx <= 15);
1140    return (op << 4) | idx;
1141}
1142
1143/**
1144 * get_memop
1145 * @oi: combined op/idx parameter
1146 *
1147 * Extract the memory operation from the combined value.
1148 */
1149static inline TCGMemOp get_memop(TCGMemOpIdx oi)
1150{
1151    return oi >> 4;
1152}
1153
1154/**
1155 * get_mmuidx
1156 * @oi: combined op/idx parameter
1157 *
1158 * Extract the mmu index from the combined value.
1159 */
1160static inline unsigned get_mmuidx(TCGMemOpIdx oi)
1161{
1162    return oi & 15;
1163}
1164
1165/**
1166 * tcg_qemu_tb_exec:
1167 * @env: pointer to CPUArchState for the CPU
1168 * @tb_ptr: address of generated code for the TB to execute
1169 *
1170 * Start executing code from a given translation block.
1171 * Where translation blocks have been linked, execution
1172 * may proceed from the given TB into successive ones.
1173 * Control eventually returns only when some action is needed
1174 * from the top-level loop: either control must pass to a TB
1175 * which has not yet been directly linked, or an asynchronous
1176 * event such as an interrupt needs handling.
1177 *
1178 * Return: The return value is the value passed to the corresponding
1179 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
1180 * The value is either zero or a 4-byte aligned pointer to that TB combined
1181 * with additional information in its two least significant bits. The
1182 * additional information is encoded as follows:
1183 *  0, 1: the link between this TB and the next is via the specified
1184 *        TB index (0 or 1). That is, we left the TB via (the equivalent
1185 *        of) "goto_tb <index>". The main loop uses this to determine
1186 *        how to link the TB just executed to the next.
1187 *  2:    we are using instruction counting code generation, and we
1188 *        did not start executing this TB because the instruction counter
1189 *        would hit zero midway through it. In this case the pointer
1190 *        returned is the TB we were about to execute, and the caller must
1191 *        arrange to execute the remaining count of instructions.
1192 *  3:    we stopped because the CPU's exit_request flag was set
1193 *        (usually meaning that there is an interrupt that needs to be
1194 *        handled). The pointer returned is the TB we were about to execute
1195 *        when we noticed the pending exit request.
1196 *
1197 * If the bottom two bits indicate an exit-via-index then the CPU
1198 * state is correctly synchronised and ready for execution of the next
1199 * TB (and in particular the guest PC is the address to execute next).
1200 * Otherwise, we gave up on execution of this TB before it started, and
1201 * the caller must fix up the CPU state by calling the CPU's
1202 * synchronize_from_tb() method with the TB pointer we return (falling
1203 * back to calling the CPU's set_pc method with tb->pb if no
1204 * synchronize_from_tb() method exists).
1205 *
1206 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
1207 * to this default (which just calls the prologue.code emitted by
1208 * tcg_target_qemu_prologue()).
1209 */
1210#define TB_EXIT_MASK 3
1211#define TB_EXIT_IDX0 0
1212#define TB_EXIT_IDX1 1
1213#define TB_EXIT_REQUESTED 3
1214
1215#ifdef HAVE_TCG_QEMU_TB_EXEC
1216uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
1217#else
1218# define tcg_qemu_tb_exec(env, tb_ptr) \
1219    ((uintptr_t (*)(void *, void *))tcg_ctx->code_gen_prologue)(env, tb_ptr)
1220#endif
1221
1222void tcg_register_jit(void *buf, size_t buf_size);
1223
1224#if TCG_TARGET_MAYBE_vec
1225/* Return zero if the tuple (opc, type, vece) is unsupportable;
1226   return > 0 if it is directly supportable;
1227   return < 0 if we must call tcg_expand_vec_op.  */
1228int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned);
1229#else
1230static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
1231{
1232    return 0;
1233}
1234#endif
1235
1236/* Expand the tuple (opc, type, vece) on the given arguments.  */
1237void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...);
1238
1239/* Replicate a constant C accoring to the log2 of the element size.  */
1240uint64_t dup_const(unsigned vece, uint64_t c);
1241
1242#define dup_const(VECE, C)                                         \
1243    (__builtin_constant_p(VECE)                                    \
1244     ? (  (VECE) == MO_8  ? 0x0101010101010101ull * (uint8_t)(C)   \
1245        : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C)  \
1246        : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C)  \
1247        : dup_const(VECE, C))                                      \
1248     : dup_const(VECE, C))
1249
1250
1251/*
1252 * Memory helpers that will be used by TCG generated code.
1253 */
1254#ifdef CONFIG_SOFTMMU
1255/* Value zero-extended to tcg register size.  */
1256tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1257                                     TCGMemOpIdx oi, uintptr_t retaddr);
1258tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1259                                    TCGMemOpIdx oi, uintptr_t retaddr);
1260tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1261                                    TCGMemOpIdx oi, uintptr_t retaddr);
1262uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1263                           TCGMemOpIdx oi, uintptr_t retaddr);
1264tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1265                                    TCGMemOpIdx oi, uintptr_t retaddr);
1266tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1267                                    TCGMemOpIdx oi, uintptr_t retaddr);
1268uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1269                           TCGMemOpIdx oi, uintptr_t retaddr);
1270
1271/* Value sign-extended to tcg register size.  */
1272tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1273                                     TCGMemOpIdx oi, uintptr_t retaddr);
1274tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1275                                    TCGMemOpIdx oi, uintptr_t retaddr);
1276tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1277                                    TCGMemOpIdx oi, uintptr_t retaddr);
1278tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1279                                    TCGMemOpIdx oi, uintptr_t retaddr);
1280tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1281                                    TCGMemOpIdx oi, uintptr_t retaddr);
1282
1283void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1284                        TCGMemOpIdx oi, uintptr_t retaddr);
1285void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1286                       TCGMemOpIdx oi, uintptr_t retaddr);
1287void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1288                       TCGMemOpIdx oi, uintptr_t retaddr);
1289void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1290                       TCGMemOpIdx oi, uintptr_t retaddr);
1291void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1292                       TCGMemOpIdx oi, uintptr_t retaddr);
1293void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1294                       TCGMemOpIdx oi, uintptr_t retaddr);
1295void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1296                       TCGMemOpIdx oi, uintptr_t retaddr);
1297
1298uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1299                            TCGMemOpIdx oi, uintptr_t retaddr);
1300uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1301                            TCGMemOpIdx oi, uintptr_t retaddr);
1302uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1303                            TCGMemOpIdx oi, uintptr_t retaddr);
1304uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1305                            TCGMemOpIdx oi, uintptr_t retaddr);
1306uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1307                            TCGMemOpIdx oi, uintptr_t retaddr);
1308uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1309                            TCGMemOpIdx oi, uintptr_t retaddr);
1310uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1311                            TCGMemOpIdx oi, uintptr_t retaddr);
1312
1313/* Temporary aliases until backends are converted.  */
1314#ifdef TARGET_WORDS_BIGENDIAN
1315# define helper_ret_ldsw_mmu  helper_be_ldsw_mmu
1316# define helper_ret_lduw_mmu  helper_be_lduw_mmu
1317# define helper_ret_ldsl_mmu  helper_be_ldsl_mmu
1318# define helper_ret_ldul_mmu  helper_be_ldul_mmu
1319# define helper_ret_ldl_mmu   helper_be_ldul_mmu
1320# define helper_ret_ldq_mmu   helper_be_ldq_mmu
1321# define helper_ret_stw_mmu   helper_be_stw_mmu
1322# define helper_ret_stl_mmu   helper_be_stl_mmu
1323# define helper_ret_stq_mmu   helper_be_stq_mmu
1324# define helper_ret_ldw_cmmu  helper_be_ldw_cmmu
1325# define helper_ret_ldl_cmmu  helper_be_ldl_cmmu
1326# define helper_ret_ldq_cmmu  helper_be_ldq_cmmu
1327#else
1328# define helper_ret_ldsw_mmu  helper_le_ldsw_mmu
1329# define helper_ret_lduw_mmu  helper_le_lduw_mmu
1330# define helper_ret_ldsl_mmu  helper_le_ldsl_mmu
1331# define helper_ret_ldul_mmu  helper_le_ldul_mmu
1332# define helper_ret_ldl_mmu   helper_le_ldul_mmu
1333# define helper_ret_ldq_mmu   helper_le_ldq_mmu
1334# define helper_ret_stw_mmu   helper_le_stw_mmu
1335# define helper_ret_stl_mmu   helper_le_stl_mmu
1336# define helper_ret_stq_mmu   helper_le_stq_mmu
1337# define helper_ret_ldw_cmmu  helper_le_ldw_cmmu
1338# define helper_ret_ldl_cmmu  helper_le_ldl_cmmu
1339# define helper_ret_ldq_cmmu  helper_le_ldq_cmmu
1340#endif
1341
1342uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
1343                                    uint32_t cmpv, uint32_t newv,
1344                                    TCGMemOpIdx oi, uintptr_t retaddr);
1345uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
1346                                       uint32_t cmpv, uint32_t newv,
1347                                       TCGMemOpIdx oi, uintptr_t retaddr);
1348uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
1349                                       uint32_t cmpv, uint32_t newv,
1350                                       TCGMemOpIdx oi, uintptr_t retaddr);
1351uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
1352                                       uint64_t cmpv, uint64_t newv,
1353                                       TCGMemOpIdx oi, uintptr_t retaddr);
1354uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
1355                                       uint32_t cmpv, uint32_t newv,
1356                                       TCGMemOpIdx oi, uintptr_t retaddr);
1357uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
1358                                       uint32_t cmpv, uint32_t newv,
1359                                       TCGMemOpIdx oi, uintptr_t retaddr);
1360uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
1361                                       uint64_t cmpv, uint64_t newv,
1362                                       TCGMemOpIdx oi, uintptr_t retaddr);
1363
1364#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX)         \
1365TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu         \
1366    (CPUArchState *env, target_ulong addr, TYPE val,  \
1367     TCGMemOpIdx oi, uintptr_t retaddr);
1368
1369#ifdef CONFIG_ATOMIC64
1370#define GEN_ATOMIC_HELPER_ALL(NAME)          \
1371    GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
1372    GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
1373    GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
1374    GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
1375    GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)  \
1376    GEN_ATOMIC_HELPER(NAME, uint64_t, q_le)  \
1377    GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
1378#else
1379#define GEN_ATOMIC_HELPER_ALL(NAME)          \
1380    GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
1381    GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
1382    GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
1383    GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
1384    GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
1385#endif
1386
1387GEN_ATOMIC_HELPER_ALL(fetch_add)
1388GEN_ATOMIC_HELPER_ALL(fetch_sub)
1389GEN_ATOMIC_HELPER_ALL(fetch_and)
1390GEN_ATOMIC_HELPER_ALL(fetch_or)
1391GEN_ATOMIC_HELPER_ALL(fetch_xor)
1392
1393GEN_ATOMIC_HELPER_ALL(add_fetch)
1394GEN_ATOMIC_HELPER_ALL(sub_fetch)
1395GEN_ATOMIC_HELPER_ALL(and_fetch)
1396GEN_ATOMIC_HELPER_ALL(or_fetch)
1397GEN_ATOMIC_HELPER_ALL(xor_fetch)
1398
1399GEN_ATOMIC_HELPER_ALL(xchg)
1400
1401#undef GEN_ATOMIC_HELPER_ALL
1402#undef GEN_ATOMIC_HELPER
1403#endif /* CONFIG_SOFTMMU */
1404
1405#ifdef CONFIG_ATOMIC128
1406#include "qemu/int128.h"
1407
1408/* These aren't really a "proper" helpers because TCG cannot manage Int128.
1409   However, use the same format as the others, for use by the backends. */
1410Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
1411                                     Int128 cmpv, Int128 newv,
1412                                     TCGMemOpIdx oi, uintptr_t retaddr);
1413Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
1414                                     Int128 cmpv, Int128 newv,
1415                                     TCGMemOpIdx oi, uintptr_t retaddr);
1416
1417Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
1418                                TCGMemOpIdx oi, uintptr_t retaddr);
1419Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
1420                                TCGMemOpIdx oi, uintptr_t retaddr);
1421void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1422                              TCGMemOpIdx oi, uintptr_t retaddr);
1423void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1424                              TCGMemOpIdx oi, uintptr_t retaddr);
1425
1426#endif /* CONFIG_ATOMIC128 */
1427
1428#endif /* TCG_H */
1429