qemu/include/tcg/tcg.h
<<
>>
Prefs
   1/*
   2 * Tiny Code Generator for QEMU
   3 *
   4 * Copyright (c) 2008 Fabrice Bellard
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#ifndef TCG_H
  26#define TCG_H
  27
  28#include "cpu.h"
  29#include "exec/memop.h"
  30#include "exec/tb-context.h"
  31#include "qemu/bitops.h"
  32#include "qemu/plugin.h"
  33#include "qemu/queue.h"
  34#include "tcg/tcg-mo.h"
  35#include "tcg-target.h"
  36#include "qemu/int128.h"
  37
  38/* XXX: make safe guess about sizes */
  39#define MAX_OP_PER_INSTR 266
  40
  41#if HOST_LONG_BITS == 32
  42#define MAX_OPC_PARAM_PER_ARG 2
  43#else
  44#define MAX_OPC_PARAM_PER_ARG 1
  45#endif
  46#define MAX_OPC_PARAM_IARGS 6
  47#define MAX_OPC_PARAM_OARGS 1
  48#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
  49
  50/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
  51 * and up to 4 + N parameters on 64-bit archs
  52 * (N = number of input arguments + output arguments).  */
  53#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
  54
  55#define CPU_TEMP_BUF_NLONGS 128
  56
  57/* Default target word size to pointer size.  */
  58#ifndef TCG_TARGET_REG_BITS
  59# if UINTPTR_MAX == UINT32_MAX
  60#  define TCG_TARGET_REG_BITS 32
  61# elif UINTPTR_MAX == UINT64_MAX
  62#  define TCG_TARGET_REG_BITS 64
  63# else
  64#  error Unknown pointer size for tcg target
  65# endif
  66#endif
  67
  68#if TCG_TARGET_REG_BITS == 32
  69typedef int32_t tcg_target_long;
  70typedef uint32_t tcg_target_ulong;
  71#define TCG_PRIlx PRIx32
  72#define TCG_PRIld PRId32
  73#elif TCG_TARGET_REG_BITS == 64
  74typedef int64_t tcg_target_long;
  75typedef uint64_t tcg_target_ulong;
  76#define TCG_PRIlx PRIx64
  77#define TCG_PRIld PRId64
  78#else
  79#error unsupported
  80#endif
  81
  82/* Oversized TCG guests make things like MTTCG hard
  83 * as we can't use atomics for cputlb updates.
  84 */
  85#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
  86#define TCG_OVERSIZED_GUEST 1
  87#else
  88#define TCG_OVERSIZED_GUEST 0
  89#endif
  90
  91#if TCG_TARGET_NB_REGS <= 32
  92typedef uint32_t TCGRegSet;
  93#elif TCG_TARGET_NB_REGS <= 64
  94typedef uint64_t TCGRegSet;
  95#else
  96#error unsupported
  97#endif
  98
  99#if TCG_TARGET_REG_BITS == 32
 100/* Turn some undef macros into false macros.  */
 101#define TCG_TARGET_HAS_extrl_i64_i32    0
 102#define TCG_TARGET_HAS_extrh_i64_i32    0
 103#define TCG_TARGET_HAS_div_i64          0
 104#define TCG_TARGET_HAS_rem_i64          0
 105#define TCG_TARGET_HAS_div2_i64         0
 106#define TCG_TARGET_HAS_rot_i64          0
 107#define TCG_TARGET_HAS_ext8s_i64        0
 108#define TCG_TARGET_HAS_ext16s_i64       0
 109#define TCG_TARGET_HAS_ext32s_i64       0
 110#define TCG_TARGET_HAS_ext8u_i64        0
 111#define TCG_TARGET_HAS_ext16u_i64       0
 112#define TCG_TARGET_HAS_ext32u_i64       0
 113#define TCG_TARGET_HAS_bswap16_i64      0
 114#define TCG_TARGET_HAS_bswap32_i64      0
 115#define TCG_TARGET_HAS_bswap64_i64      0
 116#define TCG_TARGET_HAS_neg_i64          0
 117#define TCG_TARGET_HAS_not_i64          0
 118#define TCG_TARGET_HAS_andc_i64         0
 119#define TCG_TARGET_HAS_orc_i64          0
 120#define TCG_TARGET_HAS_eqv_i64          0
 121#define TCG_TARGET_HAS_nand_i64         0
 122#define TCG_TARGET_HAS_nor_i64          0
 123#define TCG_TARGET_HAS_clz_i64          0
 124#define TCG_TARGET_HAS_ctz_i64          0
 125#define TCG_TARGET_HAS_ctpop_i64        0
 126#define TCG_TARGET_HAS_deposit_i64      0
 127#define TCG_TARGET_HAS_extract_i64      0
 128#define TCG_TARGET_HAS_sextract_i64     0
 129#define TCG_TARGET_HAS_extract2_i64     0
 130#define TCG_TARGET_HAS_movcond_i64      0
 131#define TCG_TARGET_HAS_add2_i64         0
 132#define TCG_TARGET_HAS_sub2_i64         0
 133#define TCG_TARGET_HAS_mulu2_i64        0
 134#define TCG_TARGET_HAS_muls2_i64        0
 135#define TCG_TARGET_HAS_muluh_i64        0
 136#define TCG_TARGET_HAS_mulsh_i64        0
 137/* Turn some undef macros into true macros.  */
 138#define TCG_TARGET_HAS_add2_i32         1
 139#define TCG_TARGET_HAS_sub2_i32         1
 140#endif
 141
 142#ifndef TCG_TARGET_deposit_i32_valid
 143#define TCG_TARGET_deposit_i32_valid(ofs, len) 1
 144#endif
 145#ifndef TCG_TARGET_deposit_i64_valid
 146#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
 147#endif
 148#ifndef TCG_TARGET_extract_i32_valid
 149#define TCG_TARGET_extract_i32_valid(ofs, len) 1
 150#endif
 151#ifndef TCG_TARGET_extract_i64_valid
 152#define TCG_TARGET_extract_i64_valid(ofs, len) 1
 153#endif
 154
 155/* Only one of DIV or DIV2 should be defined.  */
 156#if defined(TCG_TARGET_HAS_div_i32)
 157#define TCG_TARGET_HAS_div2_i32         0
 158#elif defined(TCG_TARGET_HAS_div2_i32)
 159#define TCG_TARGET_HAS_div_i32          0
 160#define TCG_TARGET_HAS_rem_i32          0
 161#endif
 162#if defined(TCG_TARGET_HAS_div_i64)
 163#define TCG_TARGET_HAS_div2_i64         0
 164#elif defined(TCG_TARGET_HAS_div2_i64)
 165#define TCG_TARGET_HAS_div_i64          0
 166#define TCG_TARGET_HAS_rem_i64          0
 167#endif
 168
 169/* For 32-bit targets, some sort of unsigned widening multiply is required.  */
 170#if TCG_TARGET_REG_BITS == 32 \
 171    && !(defined(TCG_TARGET_HAS_mulu2_i32) \
 172         || defined(TCG_TARGET_HAS_muluh_i32))
 173# error "Missing unsigned widening multiply"
 174#endif
 175
 176#if !defined(TCG_TARGET_HAS_v64) \
 177    && !defined(TCG_TARGET_HAS_v128) \
 178    && !defined(TCG_TARGET_HAS_v256)
 179#define TCG_TARGET_MAYBE_vec            0
 180#define TCG_TARGET_HAS_abs_vec          0
 181#define TCG_TARGET_HAS_neg_vec          0
 182#define TCG_TARGET_HAS_not_vec          0
 183#define TCG_TARGET_HAS_andc_vec         0
 184#define TCG_TARGET_HAS_orc_vec          0
 185#define TCG_TARGET_HAS_roti_vec         0
 186#define TCG_TARGET_HAS_rots_vec         0
 187#define TCG_TARGET_HAS_rotv_vec         0
 188#define TCG_TARGET_HAS_shi_vec          0
 189#define TCG_TARGET_HAS_shs_vec          0
 190#define TCG_TARGET_HAS_shv_vec          0
 191#define TCG_TARGET_HAS_mul_vec          0
 192#define TCG_TARGET_HAS_sat_vec          0
 193#define TCG_TARGET_HAS_minmax_vec       0
 194#define TCG_TARGET_HAS_bitsel_vec       0
 195#define TCG_TARGET_HAS_cmpsel_vec       0
 196#else
 197#define TCG_TARGET_MAYBE_vec            1
 198#endif
 199#ifndef TCG_TARGET_HAS_v64
 200#define TCG_TARGET_HAS_v64              0
 201#endif
 202#ifndef TCG_TARGET_HAS_v128
 203#define TCG_TARGET_HAS_v128             0
 204#endif
 205#ifndef TCG_TARGET_HAS_v256
 206#define TCG_TARGET_HAS_v256             0
 207#endif
 208
 209#ifndef TARGET_INSN_START_EXTRA_WORDS
 210# define TARGET_INSN_START_WORDS 1
 211#else
 212# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
 213#endif
 214
 215typedef enum TCGOpcode {
 216#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
 217#include "tcg/tcg-opc.h"
 218#undef DEF
 219    NB_OPS,
 220} TCGOpcode;
 221
 222#define tcg_regset_set_reg(d, r)   ((d) |= (TCGRegSet)1 << (r))
 223#define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r)))
 224#define tcg_regset_test_reg(d, r)  (((d) >> (r)) & 1)
 225
 226#ifndef TCG_TARGET_INSN_UNIT_SIZE
 227# error "Missing TCG_TARGET_INSN_UNIT_SIZE"
 228#elif TCG_TARGET_INSN_UNIT_SIZE == 1
 229typedef uint8_t tcg_insn_unit;
 230#elif TCG_TARGET_INSN_UNIT_SIZE == 2
 231typedef uint16_t tcg_insn_unit;
 232#elif TCG_TARGET_INSN_UNIT_SIZE == 4
 233typedef uint32_t tcg_insn_unit;
 234#elif TCG_TARGET_INSN_UNIT_SIZE == 8
 235typedef uint64_t tcg_insn_unit;
 236#else
 237/* The port better have done this.  */
 238#endif
 239
 240
 241#if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
 242# define tcg_debug_assert(X) do { assert(X); } while (0)
 243#else
 244# define tcg_debug_assert(X) \
 245    do { if (!(X)) { __builtin_unreachable(); } } while (0)
 246#endif
 247
 248typedef struct TCGRelocation TCGRelocation;
 249struct TCGRelocation {
 250    QSIMPLEQ_ENTRY(TCGRelocation) next;
 251    tcg_insn_unit *ptr;
 252    intptr_t addend;
 253    int type;
 254};
 255
 256typedef struct TCGLabel TCGLabel;
 257struct TCGLabel {
 258    unsigned present : 1;
 259    unsigned has_value : 1;
 260    unsigned id : 14;
 261    unsigned refs : 16;
 262    union {
 263        uintptr_t value;
 264        const tcg_insn_unit *value_ptr;
 265    } u;
 266    QSIMPLEQ_HEAD(, TCGRelocation) relocs;
 267    QSIMPLEQ_ENTRY(TCGLabel) next;
 268};
 269
 270typedef struct TCGPool {
 271    struct TCGPool *next;
 272    int size;
 273    uint8_t data[] __attribute__ ((aligned));
 274} TCGPool;
 275
 276#define TCG_POOL_CHUNK_SIZE 32768
 277
 278#define TCG_MAX_TEMPS 512
 279#define TCG_MAX_INSNS 512
 280
 281/* when the size of the arguments of a called function is smaller than
 282   this value, they are statically allocated in the TB stack frame */
 283#define TCG_STATIC_CALL_ARGS_SIZE 128
 284
 285typedef enum TCGType {
 286    TCG_TYPE_I32,
 287    TCG_TYPE_I64,
 288
 289    TCG_TYPE_V64,
 290    TCG_TYPE_V128,
 291    TCG_TYPE_V256,
 292
 293    TCG_TYPE_COUNT, /* number of different types */
 294
 295    /* An alias for the size of the host register.  */
 296#if TCG_TARGET_REG_BITS == 32
 297    TCG_TYPE_REG = TCG_TYPE_I32,
 298#else
 299    TCG_TYPE_REG = TCG_TYPE_I64,
 300#endif
 301
 302    /* An alias for the size of the native pointer.  */
 303#if UINTPTR_MAX == UINT32_MAX
 304    TCG_TYPE_PTR = TCG_TYPE_I32,
 305#else
 306    TCG_TYPE_PTR = TCG_TYPE_I64,
 307#endif
 308
 309    /* An alias for the size of the target "long", aka register.  */
 310#if TARGET_LONG_BITS == 64
 311    TCG_TYPE_TL = TCG_TYPE_I64,
 312#else
 313    TCG_TYPE_TL = TCG_TYPE_I32,
 314#endif
 315} TCGType;
 316
 317/**
 318 * get_alignment_bits
 319 * @memop: MemOp value
 320 *
 321 * Extract the alignment size from the memop.
 322 */
 323static inline unsigned get_alignment_bits(MemOp memop)
 324{
 325    unsigned a = memop & MO_AMASK;
 326
 327    if (a == MO_UNALN) {
 328        /* No alignment required.  */
 329        a = 0;
 330    } else if (a == MO_ALIGN) {
 331        /* A natural alignment requirement.  */
 332        a = memop & MO_SIZE;
 333    } else {
 334        /* A specific alignment requirement.  */
 335        a = a >> MO_ASHIFT;
 336    }
 337#if defined(CONFIG_SOFTMMU)
 338    /* The requested alignment cannot overlap the TLB flags.  */
 339    tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0);
 340#endif
 341    return a;
 342}
 343
 344typedef tcg_target_ulong TCGArg;
 345
 346/* Define type and accessor macros for TCG variables.
 347
 348   TCG variables are the inputs and outputs of TCG ops, as described
 349   in tcg/README. Target CPU front-end code uses these types to deal
 350   with TCG variables as it emits TCG code via the tcg_gen_* functions.
 351   They come in several flavours:
 352    * TCGv_i32 : 32 bit integer type
 353    * TCGv_i64 : 64 bit integer type
 354    * TCGv_ptr : a host pointer type
 355    * TCGv_vec : a host vector type; the exact size is not exposed
 356                 to the CPU front-end code.
 357    * TCGv : an integer type the same size as target_ulong
 358             (an alias for either TCGv_i32 or TCGv_i64)
 359   The compiler's type checking will complain if you mix them
 360   up and pass the wrong sized TCGv to a function.
 361
 362   Users of tcg_gen_* don't need to know about any of the internal
 363   details of these, and should treat them as opaque types.
 364   You won't be able to look inside them in a debugger either.
 365
 366   Internal implementation details follow:
 367
 368   Note that there is no definition of the structs TCGv_i32_d etc anywhere.
 369   This is deliberate, because the values we store in variables of type
 370   TCGv_i32 are not really pointers-to-structures. They're just small
 371   integers, but keeping them in pointer types like this means that the
 372   compiler will complain if you accidentally pass a TCGv_i32 to a
 373   function which takes a TCGv_i64, and so on. Only the internals of
 374   TCG need to care about the actual contents of the types.  */
 375
 376typedef struct TCGv_i32_d *TCGv_i32;
 377typedef struct TCGv_i64_d *TCGv_i64;
 378typedef struct TCGv_ptr_d *TCGv_ptr;
 379typedef struct TCGv_vec_d *TCGv_vec;
 380typedef TCGv_ptr TCGv_env;
 381#if TARGET_LONG_BITS == 32
 382#define TCGv TCGv_i32
 383#elif TARGET_LONG_BITS == 64
 384#define TCGv TCGv_i64
 385#else
 386#error Unhandled TARGET_LONG_BITS value
 387#endif
 388
 389/* call flags */
 390/* Helper does not read globals (either directly or through an exception). It
 391   implies TCG_CALL_NO_WRITE_GLOBALS. */
 392#define TCG_CALL_NO_READ_GLOBALS    0x0001
 393/* Helper does not write globals */
 394#define TCG_CALL_NO_WRITE_GLOBALS   0x0002
 395/* Helper can be safely suppressed if the return value is not used. */
 396#define TCG_CALL_NO_SIDE_EFFECTS    0x0004
 397/* Helper is QEMU_NORETURN.  */
 398#define TCG_CALL_NO_RETURN          0x0008
 399
 400/* convenience version of most used call flags */
 401#define TCG_CALL_NO_RWG         TCG_CALL_NO_READ_GLOBALS
 402#define TCG_CALL_NO_WG          TCG_CALL_NO_WRITE_GLOBALS
 403#define TCG_CALL_NO_SE          TCG_CALL_NO_SIDE_EFFECTS
 404#define TCG_CALL_NO_RWG_SE      (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
 405#define TCG_CALL_NO_WG_SE       (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
 406
 407/* Used to align parameters.  See the comment before tcgv_i32_temp.  */
 408#define TCG_CALL_DUMMY_ARG      ((TCGArg)0)
 409
 410/* Conditions.  Note that these are laid out for easy manipulation by
 411   the functions below:
 412     bit 0 is used for inverting;
 413     bit 1 is signed,
 414     bit 2 is unsigned,
 415     bit 3 is used with bit 0 for swapping signed/unsigned.  */
 416typedef enum {
 417    /* non-signed */
 418    TCG_COND_NEVER  = 0 | 0 | 0 | 0,
 419    TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
 420    TCG_COND_EQ     = 8 | 0 | 0 | 0,
 421    TCG_COND_NE     = 8 | 0 | 0 | 1,
 422    /* signed */
 423    TCG_COND_LT     = 0 | 0 | 2 | 0,
 424    TCG_COND_GE     = 0 | 0 | 2 | 1,
 425    TCG_COND_LE     = 8 | 0 | 2 | 0,
 426    TCG_COND_GT     = 8 | 0 | 2 | 1,
 427    /* unsigned */
 428    TCG_COND_LTU    = 0 | 4 | 0 | 0,
 429    TCG_COND_GEU    = 0 | 4 | 0 | 1,
 430    TCG_COND_LEU    = 8 | 4 | 0 | 0,
 431    TCG_COND_GTU    = 8 | 4 | 0 | 1,
 432} TCGCond;
 433
 434/* Invert the sense of the comparison.  */
 435static inline TCGCond tcg_invert_cond(TCGCond c)
 436{
 437    return (TCGCond)(c ^ 1);
 438}
 439
 440/* Swap the operands in a comparison.  */
 441static inline TCGCond tcg_swap_cond(TCGCond c)
 442{
 443    return c & 6 ? (TCGCond)(c ^ 9) : c;
 444}
 445
 446/* Create an "unsigned" version of a "signed" comparison.  */
 447static inline TCGCond tcg_unsigned_cond(TCGCond c)
 448{
 449    return c & 2 ? (TCGCond)(c ^ 6) : c;
 450}
 451
 452/* Create a "signed" version of an "unsigned" comparison.  */
 453static inline TCGCond tcg_signed_cond(TCGCond c)
 454{
 455    return c & 4 ? (TCGCond)(c ^ 6) : c;
 456}
 457
 458/* Must a comparison be considered unsigned?  */
 459static inline bool is_unsigned_cond(TCGCond c)
 460{
 461    return (c & 4) != 0;
 462}
 463
 464/* Create a "high" version of a double-word comparison.
 465   This removes equality from a LTE or GTE comparison.  */
 466static inline TCGCond tcg_high_cond(TCGCond c)
 467{
 468    switch (c) {
 469    case TCG_COND_GE:
 470    case TCG_COND_LE:
 471    case TCG_COND_GEU:
 472    case TCG_COND_LEU:
 473        return (TCGCond)(c ^ 8);
 474    default:
 475        return c;
 476    }
 477}
 478
 479typedef enum TCGTempVal {
 480    TEMP_VAL_DEAD,
 481    TEMP_VAL_REG,
 482    TEMP_VAL_MEM,
 483    TEMP_VAL_CONST,
 484} TCGTempVal;
 485
 486typedef enum TCGTempKind {
 487    /* Temp is dead at the end of all basic blocks. */
 488    TEMP_NORMAL,
 489    /* Temp is saved across basic blocks but dead at the end of TBs. */
 490    TEMP_LOCAL,
 491    /* Temp is saved across both basic blocks and translation blocks. */
 492    TEMP_GLOBAL,
 493    /* Temp is in a fixed register. */
 494    TEMP_FIXED,
 495    /* Temp is a fixed constant. */
 496    TEMP_CONST,
 497} TCGTempKind;
 498
 499typedef struct TCGTemp {
 500    TCGReg reg:8;
 501    TCGTempVal val_type:8;
 502    TCGType base_type:8;
 503    TCGType type:8;
 504    TCGTempKind kind:3;
 505    unsigned int indirect_reg:1;
 506    unsigned int indirect_base:1;
 507    unsigned int mem_coherent:1;
 508    unsigned int mem_allocated:1;
 509    unsigned int temp_allocated:1;
 510
 511    int64_t val;
 512    struct TCGTemp *mem_base;
 513    intptr_t mem_offset;
 514    const char *name;
 515
 516    /* Pass-specific information that can be stored for a temporary.
 517       One word worth of integer data, and one pointer to data
 518       allocated separately.  */
 519    uintptr_t state;
 520    void *state_ptr;
 521} TCGTemp;
 522
 523typedef struct TCGContext TCGContext;
 524
 525typedef struct TCGTempSet {
 526    unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
 527} TCGTempSet;
 528
 529/* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
 530   this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
 531   There are never more than 2 outputs, which means that we can store all
 532   dead + sync data within 16 bits.  */
 533#define DEAD_ARG  4
 534#define SYNC_ARG  1
 535typedef uint16_t TCGLifeData;
 536
 537/* The layout here is designed to avoid a bitfield crossing of
 538   a 32-bit boundary, which would cause GCC to add extra padding.  */
 539typedef struct TCGOp {
 540    TCGOpcode opc   : 8;        /*  8 */
 541
 542    /* Parameters for this opcode.  See below.  */
 543    unsigned param1 : 4;        /* 12 */
 544    unsigned param2 : 4;        /* 16 */
 545
 546    /* Lifetime data of the operands.  */
 547    unsigned life   : 16;       /* 32 */
 548
 549    /* Next and previous opcodes.  */
 550    QTAILQ_ENTRY(TCGOp) link;
 551#ifdef CONFIG_PLUGIN
 552    QSIMPLEQ_ENTRY(TCGOp) plugin_link;
 553#endif
 554
 555    /* Arguments for the opcode.  */
 556    TCGArg args[MAX_OPC_PARAM];
 557
 558    /* Register preferences for the output(s).  */
 559    TCGRegSet output_pref[2];
 560} TCGOp;
 561
 562#define TCGOP_CALLI(X)    (X)->param1
 563#define TCGOP_CALLO(X)    (X)->param2
 564
 565#define TCGOP_VECL(X)     (X)->param1
 566#define TCGOP_VECE(X)     (X)->param2
 567
 568/* Make sure operands fit in the bitfields above.  */
 569QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
 570
 571typedef struct TCGProfile {
 572    int64_t cpu_exec_time;
 573    int64_t tb_count1;
 574    int64_t tb_count;
 575    int64_t op_count; /* total insn count */
 576    int op_count_max; /* max insn per TB */
 577    int temp_count_max;
 578    int64_t temp_count;
 579    int64_t del_op_count;
 580    int64_t code_in_len;
 581    int64_t code_out_len;
 582    int64_t search_out_len;
 583    int64_t interm_time;
 584    int64_t code_time;
 585    int64_t la_time;
 586    int64_t opt_time;
 587    int64_t restore_count;
 588    int64_t restore_time;
 589    int64_t table_op_count[NB_OPS];
 590} TCGProfile;
 591
 592struct TCGContext {
 593    uint8_t *pool_cur, *pool_end;
 594    TCGPool *pool_first, *pool_current, *pool_first_large;
 595    int nb_labels;
 596    int nb_globals;
 597    int nb_temps;
 598    int nb_indirects;
 599    int nb_ops;
 600
 601    /* goto_tb support */
 602    tcg_insn_unit *code_buf;
 603    uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
 604    uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
 605    uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
 606
 607    TCGRegSet reserved_regs;
 608    uint32_t tb_cflags; /* cflags of the current TB */
 609    intptr_t current_frame_offset;
 610    intptr_t frame_start;
 611    intptr_t frame_end;
 612    TCGTemp *frame_temp;
 613
 614    tcg_insn_unit *code_ptr;
 615
 616#ifdef CONFIG_PROFILER
 617    TCGProfile prof;
 618#endif
 619
 620#ifdef CONFIG_DEBUG_TCG
 621    int temps_in_use;
 622    int goto_tb_issue_mask;
 623    const TCGOpcode *vecop_list;
 624#endif
 625
 626    /* Code generation.  Note that we specifically do not use tcg_insn_unit
 627       here, because there's too much arithmetic throughout that relies
 628       on addition and subtraction working on bytes.  Rely on the GCC
 629       extension that allows arithmetic on void*.  */
 630    void *code_gen_buffer;
 631    size_t code_gen_buffer_size;
 632    void *code_gen_ptr;
 633    void *data_gen_ptr;
 634
 635    /* Threshold to flush the translated code buffer.  */
 636    void *code_gen_highwater;
 637
 638    size_t tb_phys_invalidate_count;
 639
 640    /* Track which vCPU triggers events */
 641    CPUState *cpu;                      /* *_trans */
 642
 643    /* These structures are private to tcg-target.c.inc.  */
 644#ifdef TCG_TARGET_NEED_LDST_LABELS
 645    QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels;
 646#endif
 647#ifdef TCG_TARGET_NEED_POOL_LABELS
 648    struct TCGLabelPoolData *pool_labels;
 649#endif
 650
 651    TCGLabel *exitreq_label;
 652
 653#ifdef CONFIG_PLUGIN
 654    /*
 655     * We keep one plugin_tb struct per TCGContext. Note that on every TB
 656     * translation we clear but do not free its contents; this way we
 657     * avoid a lot of malloc/free churn, since after a few TB's it's
 658     * unlikely that we'll need to allocate either more instructions or more
 659     * space for instructions (for variable-instruction-length ISAs).
 660     */
 661    struct qemu_plugin_tb *plugin_tb;
 662
 663    /* descriptor of the instruction being translated */
 664    struct qemu_plugin_insn *plugin_insn;
 665
 666    /* list to quickly access the injected ops */
 667    QSIMPLEQ_HEAD(, TCGOp) plugin_ops;
 668#endif
 669
 670    GHashTable *const_table[TCG_TYPE_COUNT];
 671    TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
 672    TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
 673
 674    QTAILQ_HEAD(, TCGOp) ops, free_ops;
 675    QSIMPLEQ_HEAD(, TCGLabel) labels;
 676
 677    /* Tells which temporary holds a given register.
 678       It does not take into account fixed registers */
 679    TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
 680
 681    uint16_t gen_insn_end_off[TCG_MAX_INSNS];
 682    target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
 683
 684    /* Exit to translator on overflow. */
 685    sigjmp_buf jmp_trans;
 686};
 687
 688static inline bool temp_readonly(TCGTemp *ts)
 689{
 690    return ts->kind >= TEMP_FIXED;
 691}
 692
 693extern TCGContext tcg_init_ctx;
 694extern __thread TCGContext *tcg_ctx;
 695extern const void *tcg_code_gen_epilogue;
 696extern uintptr_t tcg_splitwx_diff;
 697extern TCGv_env cpu_env;
 698
 699static inline bool in_code_gen_buffer(const void *p)
 700{
 701    const TCGContext *s = &tcg_init_ctx;
 702    /*
 703     * Much like it is valid to have a pointer to the byte past the
 704     * end of an array (so long as you don't dereference it), allow
 705     * a pointer to the byte past the end of the code gen buffer.
 706     */
 707    return (size_t)(p - s->code_gen_buffer) <= s->code_gen_buffer_size;
 708}
 709
 710#ifdef CONFIG_DEBUG_TCG
 711const void *tcg_splitwx_to_rx(void *rw);
 712void *tcg_splitwx_to_rw(const void *rx);
 713#else
 714static inline const void *tcg_splitwx_to_rx(void *rw)
 715{
 716    return rw ? rw + tcg_splitwx_diff : NULL;
 717}
 718
 719static inline void *tcg_splitwx_to_rw(const void *rx)
 720{
 721    return rx ? (void *)rx - tcg_splitwx_diff : NULL;
 722}
 723#endif
 724
 725static inline size_t temp_idx(TCGTemp *ts)
 726{
 727    ptrdiff_t n = ts - tcg_ctx->temps;
 728    tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps);
 729    return n;
 730}
 731
 732static inline TCGArg temp_arg(TCGTemp *ts)
 733{
 734    return (uintptr_t)ts;
 735}
 736
 737static inline TCGTemp *arg_temp(TCGArg a)
 738{
 739    return (TCGTemp *)(uintptr_t)a;
 740}
 741
 742/* Using the offset of a temporary, relative to TCGContext, rather than
 743   its index means that we don't use 0.  That leaves offset 0 free for
 744   a NULL representation without having to leave index 0 unused.  */
 745static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v)
 746{
 747    uintptr_t o = (uintptr_t)v;
 748    TCGTemp *t = (void *)tcg_ctx + o;
 749    tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o);
 750    return t;
 751}
 752
 753static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
 754{
 755    return tcgv_i32_temp((TCGv_i32)v);
 756}
 757
 758static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
 759{
 760    return tcgv_i32_temp((TCGv_i32)v);
 761}
 762
 763static inline TCGTemp *tcgv_vec_temp(TCGv_vec v)
 764{
 765    return tcgv_i32_temp((TCGv_i32)v);
 766}
 767
 768static inline TCGArg tcgv_i32_arg(TCGv_i32 v)
 769{
 770    return temp_arg(tcgv_i32_temp(v));
 771}
 772
 773static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
 774{
 775    return temp_arg(tcgv_i64_temp(v));
 776}
 777
 778static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
 779{
 780    return temp_arg(tcgv_ptr_temp(v));
 781}
 782
 783static inline TCGArg tcgv_vec_arg(TCGv_vec v)
 784{
 785    return temp_arg(tcgv_vec_temp(v));
 786}
 787
 788static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t)
 789{
 790    (void)temp_idx(t); /* trigger embedded assert */
 791    return (TCGv_i32)((void *)t - (void *)tcg_ctx);
 792}
 793
 794static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
 795{
 796    return (TCGv_i64)temp_tcgv_i32(t);
 797}
 798
 799static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
 800{
 801    return (TCGv_ptr)temp_tcgv_i32(t);
 802}
 803
 804static inline TCGv_vec temp_tcgv_vec(TCGTemp *t)
 805{
 806    return (TCGv_vec)temp_tcgv_i32(t);
 807}
 808
 809#if TCG_TARGET_REG_BITS == 32
 810static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
 811{
 812    return temp_tcgv_i32(tcgv_i64_temp(t));
 813}
 814
 815static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
 816{
 817    return temp_tcgv_i32(tcgv_i64_temp(t) + 1);
 818}
 819#endif
 820
 821static inline TCGArg tcg_get_insn_param(TCGOp *op, int arg)
 822{
 823    return op->args[arg];
 824}
 825
 826static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
 827{
 828    op->args[arg] = v;
 829}
 830
 831static inline target_ulong tcg_get_insn_start_param(TCGOp *op, int arg)
 832{
 833#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
 834    return tcg_get_insn_param(op, arg);
 835#else
 836    return tcg_get_insn_param(op, arg * 2) |
 837           ((uint64_t)tcg_get_insn_param(op, arg * 2 + 1) << 32);
 838#endif
 839}
 840
 841static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v)
 842{
 843#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
 844    tcg_set_insn_param(op, arg, v);
 845#else
 846    tcg_set_insn_param(op, arg * 2, v);
 847    tcg_set_insn_param(op, arg * 2 + 1, v >> 32);
 848#endif
 849}
 850
 851/* The last op that was emitted.  */
 852static inline TCGOp *tcg_last_op(void)
 853{
 854    return QTAILQ_LAST(&tcg_ctx->ops);
 855}
 856
 857/* Test for whether to terminate the TB for using too many opcodes.  */
 858static inline bool tcg_op_buf_full(void)
 859{
 860    /* This is not a hard limit, it merely stops translation when
 861     * we have produced "enough" opcodes.  We want to limit TB size
 862     * such that a RISC host can reasonably use a 16-bit signed
 863     * branch within the TB.  We also need to be mindful of the
 864     * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[]
 865     * and TCGContext.gen_insn_end_off[].
 866     */
 867    return tcg_ctx->nb_ops >= 4000;
 868}
 869
 870/* pool based memory allocation */
 871
 872/* user-mode: mmap_lock must be held for tcg_malloc_internal. */
 873void *tcg_malloc_internal(TCGContext *s, int size);
 874void tcg_pool_reset(TCGContext *s);
 875TranslationBlock *tcg_tb_alloc(TCGContext *s);
 876
 877void tcg_region_init(void);
 878void tb_destroy(TranslationBlock *tb);
 879void tcg_region_reset_all(void);
 880
 881size_t tcg_code_size(void);
 882size_t tcg_code_capacity(void);
 883
 884void tcg_tb_insert(TranslationBlock *tb);
 885void tcg_tb_remove(TranslationBlock *tb);
 886size_t tcg_tb_phys_invalidate_count(void);
 887TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
 888void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
 889size_t tcg_nb_tbs(void);
 890
 891/* user-mode: Called with mmap_lock held.  */
 892static inline void *tcg_malloc(int size)
 893{
 894    TCGContext *s = tcg_ctx;
 895    uint8_t *ptr, *ptr_end;
 896
 897    /* ??? This is a weak placeholder for minimum malloc alignment.  */
 898    size = QEMU_ALIGN_UP(size, 8);
 899
 900    ptr = s->pool_cur;
 901    ptr_end = ptr + size;
 902    if (unlikely(ptr_end > s->pool_end)) {
 903        return tcg_malloc_internal(tcg_ctx, size);
 904    } else {
 905        s->pool_cur = ptr_end;
 906        return ptr;
 907    }
 908}
 909
 910void tcg_context_init(TCGContext *s);
 911void tcg_register_thread(void);
 912void tcg_prologue_init(TCGContext *s);
 913void tcg_func_start(TCGContext *s);
 914
 915int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
 916
 917void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
 918
 919TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
 920                                     intptr_t, const char *);
 921TCGTemp *tcg_temp_new_internal(TCGType, bool);
 922void tcg_temp_free_internal(TCGTemp *);
 923TCGv_vec tcg_temp_new_vec(TCGType type);
 924TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
 925
 926static inline void tcg_temp_free_i32(TCGv_i32 arg)
 927{
 928    tcg_temp_free_internal(tcgv_i32_temp(arg));
 929}
 930
 931static inline void tcg_temp_free_i64(TCGv_i64 arg)
 932{
 933    tcg_temp_free_internal(tcgv_i64_temp(arg));
 934}
 935
 936static inline void tcg_temp_free_ptr(TCGv_ptr arg)
 937{
 938    tcg_temp_free_internal(tcgv_ptr_temp(arg));
 939}
 940
 941static inline void tcg_temp_free_vec(TCGv_vec arg)
 942{
 943    tcg_temp_free_internal(tcgv_vec_temp(arg));
 944}
 945
 946static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
 947                                              const char *name)
 948{
 949    TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
 950    return temp_tcgv_i32(t);
 951}
 952
 953static inline TCGv_i32 tcg_temp_new_i32(void)
 954{
 955    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, false);
 956    return temp_tcgv_i32(t);
 957}
 958
 959static inline TCGv_i32 tcg_temp_local_new_i32(void)
 960{
 961    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, true);
 962    return temp_tcgv_i32(t);
 963}
 964
 965static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
 966                                              const char *name)
 967{
 968    TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
 969    return temp_tcgv_i64(t);
 970}
 971
 972static inline TCGv_i64 tcg_temp_new_i64(void)
 973{
 974    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, false);
 975    return temp_tcgv_i64(t);
 976}
 977
 978static inline TCGv_i64 tcg_temp_local_new_i64(void)
 979{
 980    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, true);
 981    return temp_tcgv_i64(t);
 982}
 983
 984static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset,
 985                                              const char *name)
 986{
 987    TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_PTR, reg, offset, name);
 988    return temp_tcgv_ptr(t);
 989}
 990
 991static inline TCGv_ptr tcg_temp_new_ptr(void)
 992{
 993    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, false);
 994    return temp_tcgv_ptr(t);
 995}
 996
 997static inline TCGv_ptr tcg_temp_local_new_ptr(void)
 998{
 999    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, true);
1000    return temp_tcgv_ptr(t);
1001}
1002
1003#if defined(CONFIG_DEBUG_TCG)
1004/* If you call tcg_clear_temp_count() at the start of a section of
1005 * code which is not supposed to leak any TCG temporaries, then
1006 * calling tcg_check_temp_count() at the end of the section will
1007 * return 1 if the section did in fact leak a temporary.
1008 */
1009void tcg_clear_temp_count(void);
1010int tcg_check_temp_count(void);
1011#else
1012#define tcg_clear_temp_count() do { } while (0)
1013#define tcg_check_temp_count() 0
1014#endif
1015
1016int64_t tcg_cpu_exec_time(void);
1017void tcg_dump_info(void);
1018void tcg_dump_op_count(void);
1019
1020#define TCG_CT_CONST  1 /* any constant of register size */
1021
1022typedef struct TCGArgConstraint {
1023    unsigned ct : 16;
1024    unsigned alias_index : 4;
1025    unsigned sort_index : 4;
1026    bool oalias : 1;
1027    bool ialias : 1;
1028    bool newreg : 1;
1029    TCGRegSet regs;
1030} TCGArgConstraint;
1031
1032#define TCG_MAX_OP_ARGS 16
1033
1034/* Bits for TCGOpDef->flags, 8 bits available, all used.  */
1035enum {
1036    /* Instruction exits the translation block.  */
1037    TCG_OPF_BB_EXIT      = 0x01,
1038    /* Instruction defines the end of a basic block.  */
1039    TCG_OPF_BB_END       = 0x02,
1040    /* Instruction clobbers call registers and potentially update globals.  */
1041    TCG_OPF_CALL_CLOBBER = 0x04,
1042    /* Instruction has side effects: it cannot be removed if its outputs
1043       are not used, and might trigger exceptions.  */
1044    TCG_OPF_SIDE_EFFECTS = 0x08,
1045    /* Instruction operands are 64-bits (otherwise 32-bits).  */
1046    TCG_OPF_64BIT        = 0x10,
1047    /* Instruction is optional and not implemented by the host, or insn
1048       is generic and should not be implemened by the host.  */
1049    TCG_OPF_NOT_PRESENT  = 0x20,
1050    /* Instruction operands are vectors.  */
1051    TCG_OPF_VECTOR       = 0x40,
1052    /* Instruction is a conditional branch. */
1053    TCG_OPF_COND_BRANCH  = 0x80
1054};
1055
1056typedef struct TCGOpDef {
1057    const char *name;
1058    uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
1059    uint8_t flags;
1060    TCGArgConstraint *args_ct;
1061} TCGOpDef;
1062
1063extern TCGOpDef tcg_op_defs[];
1064extern const size_t tcg_op_defs_max;
1065
1066typedef struct TCGTargetOpDef {
1067    TCGOpcode op;
1068    const char *args_ct_str[TCG_MAX_OP_ARGS];
1069} TCGTargetOpDef;
1070
1071#define tcg_abort() \
1072do {\
1073    fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
1074    abort();\
1075} while (0)
1076
1077bool tcg_op_supported(TCGOpcode op);
1078
1079void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
1080
1081TCGOp *tcg_emit_op(TCGOpcode opc);
1082void tcg_op_remove(TCGContext *s, TCGOp *op);
1083TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc);
1084TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc);
1085
1086void tcg_optimize(TCGContext *s);
1087
1088/* Allocate a new temporary and initialize it with a constant. */
1089TCGv_i32 tcg_const_i32(int32_t val);
1090TCGv_i64 tcg_const_i64(int64_t val);
1091TCGv_i32 tcg_const_local_i32(int32_t val);
1092TCGv_i64 tcg_const_local_i64(int64_t val);
1093TCGv_vec tcg_const_zeros_vec(TCGType);
1094TCGv_vec tcg_const_ones_vec(TCGType);
1095TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
1096TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
1097
1098/*
1099 * Locate or create a read-only temporary that is a constant.
1100 * This kind of temporary need not and should not be freed.
1101 */
1102TCGTemp *tcg_constant_internal(TCGType type, int64_t val);
1103
1104static inline TCGv_i32 tcg_constant_i32(int32_t val)
1105{
1106    return temp_tcgv_i32(tcg_constant_internal(TCG_TYPE_I32, val));
1107}
1108
1109static inline TCGv_i64 tcg_constant_i64(int64_t val)
1110{
1111    return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64, val));
1112}
1113
1114TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val);
1115TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val);
1116
1117#if UINTPTR_MAX == UINT32_MAX
1118# define tcg_const_ptr(x)        ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
1119# define tcg_const_local_ptr(x)  ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x)))
1120#else
1121# define tcg_const_ptr(x)        ((TCGv_ptr)tcg_const_i64((intptr_t)(x)))
1122# define tcg_const_local_ptr(x)  ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x)))
1123#endif
1124
1125TCGLabel *gen_new_label(void);
1126
1127/**
1128 * label_arg
1129 * @l: label
1130 *
1131 * Encode a label for storage in the TCG opcode stream.
1132 */
1133
1134static inline TCGArg label_arg(TCGLabel *l)
1135{
1136    return (uintptr_t)l;
1137}
1138
1139/**
1140 * arg_label
1141 * @i: value
1142 *
1143 * The opposite of label_arg.  Retrieve a label from the
1144 * encoding of the TCG opcode stream.
1145 */
1146
1147static inline TCGLabel *arg_label(TCGArg i)
1148{
1149    return (TCGLabel *)(uintptr_t)i;
1150}
1151
1152/**
1153 * tcg_ptr_byte_diff
1154 * @a, @b: addresses to be differenced
1155 *
1156 * There are many places within the TCG backends where we need a byte
1157 * difference between two pointers.  While this can be accomplished
1158 * with local casting, it's easy to get wrong -- especially if one is
1159 * concerned with the signedness of the result.
1160 *
1161 * This version relies on GCC's void pointer arithmetic to get the
1162 * correct result.
1163 */
1164
1165static inline ptrdiff_t tcg_ptr_byte_diff(const void *a, const void *b)
1166{
1167    return a - b;
1168}
1169
1170/**
1171 * tcg_pcrel_diff
1172 * @s: the tcg context
1173 * @target: address of the target
1174 *
1175 * Produce a pc-relative difference, from the current code_ptr
1176 * to the destination address.
1177 */
1178
1179static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, const void *target)
1180{
1181    return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_ptr));
1182}
1183
1184/**
1185 * tcg_tbrel_diff
1186 * @s: the tcg context
1187 * @target: address of the target
1188 *
1189 * Produce a difference, from the beginning of the current TB code
1190 * to the destination address.
1191 */
1192static inline ptrdiff_t tcg_tbrel_diff(TCGContext *s, const void *target)
1193{
1194    return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_buf));
1195}
1196
1197/**
1198 * tcg_current_code_size
1199 * @s: the tcg context
1200 *
1201 * Compute the current code size within the translation block.
1202 * This is used to fill in qemu's data structures for goto_tb.
1203 */
1204
1205static inline size_t tcg_current_code_size(TCGContext *s)
1206{
1207    return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
1208}
1209
1210/* Combine the MemOp and mmu_idx parameters into a single value.  */
1211typedef uint32_t TCGMemOpIdx;
1212
1213/**
1214 * make_memop_idx
1215 * @op: memory operation
1216 * @idx: mmu index
1217 *
1218 * Encode these values into a single parameter.
1219 */
1220static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx)
1221{
1222    tcg_debug_assert(idx <= 15);
1223    return (op << 4) | idx;
1224}
1225
1226/**
1227 * get_memop
1228 * @oi: combined op/idx parameter
1229 *
1230 * Extract the memory operation from the combined value.
1231 */
1232static inline MemOp get_memop(TCGMemOpIdx oi)
1233{
1234    return oi >> 4;
1235}
1236
1237/**
1238 * get_mmuidx
1239 * @oi: combined op/idx parameter
1240 *
1241 * Extract the mmu index from the combined value.
1242 */
1243static inline unsigned get_mmuidx(TCGMemOpIdx oi)
1244{
1245    return oi & 15;
1246}
1247
1248/**
1249 * tcg_qemu_tb_exec:
1250 * @env: pointer to CPUArchState for the CPU
1251 * @tb_ptr: address of generated code for the TB to execute
1252 *
1253 * Start executing code from a given translation block.
1254 * Where translation blocks have been linked, execution
1255 * may proceed from the given TB into successive ones.
1256 * Control eventually returns only when some action is needed
1257 * from the top-level loop: either control must pass to a TB
1258 * which has not yet been directly linked, or an asynchronous
1259 * event such as an interrupt needs handling.
1260 *
1261 * Return: The return value is the value passed to the corresponding
1262 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
1263 * The value is either zero or a 4-byte aligned pointer to that TB combined
1264 * with additional information in its two least significant bits. The
1265 * additional information is encoded as follows:
1266 *  0, 1: the link between this TB and the next is via the specified
1267 *        TB index (0 or 1). That is, we left the TB via (the equivalent
1268 *        of) "goto_tb <index>". The main loop uses this to determine
1269 *        how to link the TB just executed to the next.
1270 *  2:    we are using instruction counting code generation, and we
1271 *        did not start executing this TB because the instruction counter
1272 *        would hit zero midway through it. In this case the pointer
1273 *        returned is the TB we were about to execute, and the caller must
1274 *        arrange to execute the remaining count of instructions.
1275 *  3:    we stopped because the CPU's exit_request flag was set
1276 *        (usually meaning that there is an interrupt that needs to be
1277 *        handled). The pointer returned is the TB we were about to execute
1278 *        when we noticed the pending exit request.
1279 *
1280 * If the bottom two bits indicate an exit-via-index then the CPU
1281 * state is correctly synchronised and ready for execution of the next
1282 * TB (and in particular the guest PC is the address to execute next).
1283 * Otherwise, we gave up on execution of this TB before it started, and
1284 * the caller must fix up the CPU state by calling the CPU's
1285 * synchronize_from_tb() method with the TB pointer we return (falling
1286 * back to calling the CPU's set_pc method with tb->pb if no
1287 * synchronize_from_tb() method exists).
1288 *
1289 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
1290 * to this default (which just calls the prologue.code emitted by
1291 * tcg_target_qemu_prologue()).
1292 */
1293#define TB_EXIT_MASK      3
1294#define TB_EXIT_IDX0      0
1295#define TB_EXIT_IDX1      1
1296#define TB_EXIT_IDXMAX    1
1297#define TB_EXIT_REQUESTED 3
1298
1299#ifdef CONFIG_TCG_INTERPRETER
1300uintptr_t tcg_qemu_tb_exec(CPUArchState *env, const void *tb_ptr);
1301#else
1302typedef uintptr_t tcg_prologue_fn(CPUArchState *env, const void *tb_ptr);
1303extern tcg_prologue_fn *tcg_qemu_tb_exec;
1304#endif
1305
1306void tcg_register_jit(const void *buf, size_t buf_size);
1307
1308#if TCG_TARGET_MAYBE_vec
1309/* Return zero if the tuple (opc, type, vece) is unsupportable;
1310   return > 0 if it is directly supportable;
1311   return < 0 if we must call tcg_expand_vec_op.  */
1312int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned);
1313#else
1314static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
1315{
1316    return 0;
1317}
1318#endif
1319
1320/* Expand the tuple (opc, type, vece) on the given arguments.  */
1321void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...);
1322
1323/* Replicate a constant C accoring to the log2 of the element size.  */
1324uint64_t dup_const(unsigned vece, uint64_t c);
1325
1326#define dup_const(VECE, C)                                         \
1327    (__builtin_constant_p(VECE)                                    \
1328     ? (  (VECE) == MO_8  ? 0x0101010101010101ull * (uint8_t)(C)   \
1329        : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C)  \
1330        : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C)  \
1331        : (VECE) == MO_64 ? (uint64_t)(C)                          \
1332        : (qemu_build_not_reached_always(), 0))                    \
1333     : dup_const(VECE, C))
1334
1335
1336/*
1337 * Memory helpers that will be used by TCG generated code.
1338 */
1339#ifdef CONFIG_SOFTMMU
1340/* Value zero-extended to tcg register size.  */
1341tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1342                                     TCGMemOpIdx oi, uintptr_t retaddr);
1343tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1344                                    TCGMemOpIdx oi, uintptr_t retaddr);
1345tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1346                                    TCGMemOpIdx oi, uintptr_t retaddr);
1347uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1348                           TCGMemOpIdx oi, uintptr_t retaddr);
1349tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1350                                    TCGMemOpIdx oi, uintptr_t retaddr);
1351tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1352                                    TCGMemOpIdx oi, uintptr_t retaddr);
1353uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1354                           TCGMemOpIdx oi, uintptr_t retaddr);
1355
1356/* Value sign-extended to tcg register size.  */
1357tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1358                                     TCGMemOpIdx oi, uintptr_t retaddr);
1359tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1360                                    TCGMemOpIdx oi, uintptr_t retaddr);
1361tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1362                                    TCGMemOpIdx oi, uintptr_t retaddr);
1363tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1364                                    TCGMemOpIdx oi, uintptr_t retaddr);
1365tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1366                                    TCGMemOpIdx oi, uintptr_t retaddr);
1367
1368void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1369                        TCGMemOpIdx oi, uintptr_t retaddr);
1370void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1371                       TCGMemOpIdx oi, uintptr_t retaddr);
1372void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1373                       TCGMemOpIdx oi, uintptr_t retaddr);
1374void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1375                       TCGMemOpIdx oi, uintptr_t retaddr);
1376void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1377                       TCGMemOpIdx oi, uintptr_t retaddr);
1378void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1379                       TCGMemOpIdx oi, uintptr_t retaddr);
1380void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1381                       TCGMemOpIdx oi, uintptr_t retaddr);
1382
1383/* Temporary aliases until backends are converted.  */
1384#ifdef TARGET_WORDS_BIGENDIAN
1385# define helper_ret_ldsw_mmu  helper_be_ldsw_mmu
1386# define helper_ret_lduw_mmu  helper_be_lduw_mmu
1387# define helper_ret_ldsl_mmu  helper_be_ldsl_mmu
1388# define helper_ret_ldul_mmu  helper_be_ldul_mmu
1389# define helper_ret_ldl_mmu   helper_be_ldul_mmu
1390# define helper_ret_ldq_mmu   helper_be_ldq_mmu
1391# define helper_ret_stw_mmu   helper_be_stw_mmu
1392# define helper_ret_stl_mmu   helper_be_stl_mmu
1393# define helper_ret_stq_mmu   helper_be_stq_mmu
1394#else
1395# define helper_ret_ldsw_mmu  helper_le_ldsw_mmu
1396# define helper_ret_lduw_mmu  helper_le_lduw_mmu
1397# define helper_ret_ldsl_mmu  helper_le_ldsl_mmu
1398# define helper_ret_ldul_mmu  helper_le_ldul_mmu
1399# define helper_ret_ldl_mmu   helper_le_ldul_mmu
1400# define helper_ret_ldq_mmu   helper_le_ldq_mmu
1401# define helper_ret_stw_mmu   helper_le_stw_mmu
1402# define helper_ret_stl_mmu   helper_le_stl_mmu
1403# define helper_ret_stq_mmu   helper_le_stq_mmu
1404#endif
1405
1406uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
1407                                    uint32_t cmpv, uint32_t newv,
1408                                    TCGMemOpIdx oi, uintptr_t retaddr);
1409uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
1410                                       uint32_t cmpv, uint32_t newv,
1411                                       TCGMemOpIdx oi, uintptr_t retaddr);
1412uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
1413                                       uint32_t cmpv, uint32_t newv,
1414                                       TCGMemOpIdx oi, uintptr_t retaddr);
1415uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
1416                                       uint64_t cmpv, uint64_t newv,
1417                                       TCGMemOpIdx oi, uintptr_t retaddr);
1418uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
1419                                       uint32_t cmpv, uint32_t newv,
1420                                       TCGMemOpIdx oi, uintptr_t retaddr);
1421uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
1422                                       uint32_t cmpv, uint32_t newv,
1423                                       TCGMemOpIdx oi, uintptr_t retaddr);
1424uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
1425                                       uint64_t cmpv, uint64_t newv,
1426                                       TCGMemOpIdx oi, uintptr_t retaddr);
1427
1428#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX)         \
1429TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu         \
1430    (CPUArchState *env, target_ulong addr, TYPE val,  \
1431     TCGMemOpIdx oi, uintptr_t retaddr);
1432
1433#ifdef CONFIG_ATOMIC64
1434#define GEN_ATOMIC_HELPER_ALL(NAME)          \
1435    GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
1436    GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
1437    GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
1438    GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
1439    GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)  \
1440    GEN_ATOMIC_HELPER(NAME, uint64_t, q_le)  \
1441    GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
1442#else
1443#define GEN_ATOMIC_HELPER_ALL(NAME)          \
1444    GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
1445    GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
1446    GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
1447    GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
1448    GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
1449#endif
1450
1451GEN_ATOMIC_HELPER_ALL(fetch_add)
1452GEN_ATOMIC_HELPER_ALL(fetch_sub)
1453GEN_ATOMIC_HELPER_ALL(fetch_and)
1454GEN_ATOMIC_HELPER_ALL(fetch_or)
1455GEN_ATOMIC_HELPER_ALL(fetch_xor)
1456GEN_ATOMIC_HELPER_ALL(fetch_smin)
1457GEN_ATOMIC_HELPER_ALL(fetch_umin)
1458GEN_ATOMIC_HELPER_ALL(fetch_smax)
1459GEN_ATOMIC_HELPER_ALL(fetch_umax)
1460
1461GEN_ATOMIC_HELPER_ALL(add_fetch)
1462GEN_ATOMIC_HELPER_ALL(sub_fetch)
1463GEN_ATOMIC_HELPER_ALL(and_fetch)
1464GEN_ATOMIC_HELPER_ALL(or_fetch)
1465GEN_ATOMIC_HELPER_ALL(xor_fetch)
1466GEN_ATOMIC_HELPER_ALL(smin_fetch)
1467GEN_ATOMIC_HELPER_ALL(umin_fetch)
1468GEN_ATOMIC_HELPER_ALL(smax_fetch)
1469GEN_ATOMIC_HELPER_ALL(umax_fetch)
1470
1471GEN_ATOMIC_HELPER_ALL(xchg)
1472
1473#undef GEN_ATOMIC_HELPER_ALL
1474#undef GEN_ATOMIC_HELPER
1475#endif /* CONFIG_SOFTMMU */
1476
1477/*
1478 * These aren't really a "proper" helpers because TCG cannot manage Int128.
1479 * However, use the same format as the others, for use by the backends.
1480 *
1481 * The cmpxchg functions are only defined if HAVE_CMPXCHG128;
1482 * the ld/st functions are only defined if HAVE_ATOMIC128,
1483 * as defined by <qemu/atomic128.h>.
1484 */
1485Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
1486                                     Int128 cmpv, Int128 newv,
1487                                     TCGMemOpIdx oi, uintptr_t retaddr);
1488Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
1489                                     Int128 cmpv, Int128 newv,
1490                                     TCGMemOpIdx oi, uintptr_t retaddr);
1491
1492Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
1493                                TCGMemOpIdx oi, uintptr_t retaddr);
1494Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
1495                                TCGMemOpIdx oi, uintptr_t retaddr);
1496void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1497                              TCGMemOpIdx oi, uintptr_t retaddr);
1498void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1499                              TCGMemOpIdx oi, uintptr_t retaddr);
1500
1501#ifdef CONFIG_DEBUG_TCG
1502void tcg_assert_listed_vecop(TCGOpcode);
1503#else
1504static inline void tcg_assert_listed_vecop(TCGOpcode op) { }
1505#endif
1506
1507static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n)
1508{
1509#ifdef CONFIG_DEBUG_TCG
1510    const TCGOpcode *o = tcg_ctx->vecop_list;
1511    tcg_ctx->vecop_list = n;
1512    return o;
1513#else
1514    return NULL;
1515#endif
1516}
1517
1518bool tcg_can_emit_vecop_list(const TCGOpcode *, TCGType, unsigned);
1519
1520#endif /* TCG_H */
1521