qemu/tcg/tcg.h
<<
>>
Prefs
   1/*
   2 * Tiny Code Generator for QEMU
   3 *
   4 * Copyright (c) 2008 Fabrice Bellard
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#ifndef TCG_H
  26#define TCG_H
  27
  28#include "qemu-common.h"
  29#include "cpu.h"
  30#include "exec/tb-context.h"
  31#include "qemu/bitops.h"
  32#include "qemu/queue.h"
  33#include "tcg-mo.h"
  34#include "tcg-target.h"
  35#include "qemu/int128.h"
  36
  37/* XXX: make safe guess about sizes */
  38#define MAX_OP_PER_INSTR 266
  39
  40#if HOST_LONG_BITS == 32
  41#define MAX_OPC_PARAM_PER_ARG 2
  42#else
  43#define MAX_OPC_PARAM_PER_ARG 1
  44#endif
  45#define MAX_OPC_PARAM_IARGS 6
  46#define MAX_OPC_PARAM_OARGS 1
  47#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
  48
  49/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
  50 * and up to 4 + N parameters on 64-bit archs
  51 * (N = number of input arguments + output arguments).  */
  52#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
  53
  54#define CPU_TEMP_BUF_NLONGS 128
  55
  56/* Default target word size to pointer size.  */
  57#ifndef TCG_TARGET_REG_BITS
  58# if UINTPTR_MAX == UINT32_MAX
  59#  define TCG_TARGET_REG_BITS 32
  60# elif UINTPTR_MAX == UINT64_MAX
  61#  define TCG_TARGET_REG_BITS 64
  62# else
  63#  error Unknown pointer size for tcg target
  64# endif
  65#endif
  66
  67#if TCG_TARGET_REG_BITS == 32
  68typedef int32_t tcg_target_long;
  69typedef uint32_t tcg_target_ulong;
  70#define TCG_PRIlx PRIx32
  71#define TCG_PRIld PRId32
  72#elif TCG_TARGET_REG_BITS == 64
  73typedef int64_t tcg_target_long;
  74typedef uint64_t tcg_target_ulong;
  75#define TCG_PRIlx PRIx64
  76#define TCG_PRIld PRId64
  77#else
  78#error unsupported
  79#endif
  80
  81/* Oversized TCG guests make things like MTTCG hard
  82 * as we can't use atomics for cputlb updates.
  83 */
  84#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
  85#define TCG_OVERSIZED_GUEST 1
  86#else
  87#define TCG_OVERSIZED_GUEST 0
  88#endif
  89
  90#if TCG_TARGET_NB_REGS <= 32
  91typedef uint32_t TCGRegSet;
  92#elif TCG_TARGET_NB_REGS <= 64
  93typedef uint64_t TCGRegSet;
  94#else
  95#error unsupported
  96#endif
  97
  98#if TCG_TARGET_REG_BITS == 32
  99/* Turn some undef macros into false macros.  */
 100#define TCG_TARGET_HAS_extrl_i64_i32    0
 101#define TCG_TARGET_HAS_extrh_i64_i32    0
 102#define TCG_TARGET_HAS_div_i64          0
 103#define TCG_TARGET_HAS_rem_i64          0
 104#define TCG_TARGET_HAS_div2_i64         0
 105#define TCG_TARGET_HAS_rot_i64          0
 106#define TCG_TARGET_HAS_ext8s_i64        0
 107#define TCG_TARGET_HAS_ext16s_i64       0
 108#define TCG_TARGET_HAS_ext32s_i64       0
 109#define TCG_TARGET_HAS_ext8u_i64        0
 110#define TCG_TARGET_HAS_ext16u_i64       0
 111#define TCG_TARGET_HAS_ext32u_i64       0
 112#define TCG_TARGET_HAS_bswap16_i64      0
 113#define TCG_TARGET_HAS_bswap32_i64      0
 114#define TCG_TARGET_HAS_bswap64_i64      0
 115#define TCG_TARGET_HAS_neg_i64          0
 116#define TCG_TARGET_HAS_not_i64          0
 117#define TCG_TARGET_HAS_andc_i64         0
 118#define TCG_TARGET_HAS_orc_i64          0
 119#define TCG_TARGET_HAS_eqv_i64          0
 120#define TCG_TARGET_HAS_nand_i64         0
 121#define TCG_TARGET_HAS_nor_i64          0
 122#define TCG_TARGET_HAS_clz_i64          0
 123#define TCG_TARGET_HAS_ctz_i64          0
 124#define TCG_TARGET_HAS_ctpop_i64        0
 125#define TCG_TARGET_HAS_deposit_i64      0
 126#define TCG_TARGET_HAS_extract_i64      0
 127#define TCG_TARGET_HAS_sextract_i64     0
 128#define TCG_TARGET_HAS_movcond_i64      0
 129#define TCG_TARGET_HAS_add2_i64         0
 130#define TCG_TARGET_HAS_sub2_i64         0
 131#define TCG_TARGET_HAS_mulu2_i64        0
 132#define TCG_TARGET_HAS_muls2_i64        0
 133#define TCG_TARGET_HAS_muluh_i64        0
 134#define TCG_TARGET_HAS_mulsh_i64        0
 135/* Turn some undef macros into true macros.  */
 136#define TCG_TARGET_HAS_add2_i32         1
 137#define TCG_TARGET_HAS_sub2_i32         1
 138#endif
 139
 140#ifndef TCG_TARGET_deposit_i32_valid
 141#define TCG_TARGET_deposit_i32_valid(ofs, len) 1
 142#endif
 143#ifndef TCG_TARGET_deposit_i64_valid
 144#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
 145#endif
 146#ifndef TCG_TARGET_extract_i32_valid
 147#define TCG_TARGET_extract_i32_valid(ofs, len) 1
 148#endif
 149#ifndef TCG_TARGET_extract_i64_valid
 150#define TCG_TARGET_extract_i64_valid(ofs, len) 1
 151#endif
 152
 153/* Only one of DIV or DIV2 should be defined.  */
 154#if defined(TCG_TARGET_HAS_div_i32)
 155#define TCG_TARGET_HAS_div2_i32         0
 156#elif defined(TCG_TARGET_HAS_div2_i32)
 157#define TCG_TARGET_HAS_div_i32          0
 158#define TCG_TARGET_HAS_rem_i32          0
 159#endif
 160#if defined(TCG_TARGET_HAS_div_i64)
 161#define TCG_TARGET_HAS_div2_i64         0
 162#elif defined(TCG_TARGET_HAS_div2_i64)
 163#define TCG_TARGET_HAS_div_i64          0
 164#define TCG_TARGET_HAS_rem_i64          0
 165#endif
 166
 167/* For 32-bit targets, some sort of unsigned widening multiply is required.  */
 168#if TCG_TARGET_REG_BITS == 32 \
 169    && !(defined(TCG_TARGET_HAS_mulu2_i32) \
 170         || defined(TCG_TARGET_HAS_muluh_i32))
 171# error "Missing unsigned widening multiply"
 172#endif
 173
 174#if !defined(TCG_TARGET_HAS_v64) \
 175    && !defined(TCG_TARGET_HAS_v128) \
 176    && !defined(TCG_TARGET_HAS_v256)
 177#define TCG_TARGET_MAYBE_vec            0
 178#define TCG_TARGET_HAS_neg_vec          0
 179#define TCG_TARGET_HAS_not_vec          0
 180#define TCG_TARGET_HAS_andc_vec         0
 181#define TCG_TARGET_HAS_orc_vec          0
 182#define TCG_TARGET_HAS_shi_vec          0
 183#define TCG_TARGET_HAS_shs_vec          0
 184#define TCG_TARGET_HAS_shv_vec          0
 185#define TCG_TARGET_HAS_mul_vec          0
 186#define TCG_TARGET_HAS_sat_vec          0
 187#define TCG_TARGET_HAS_minmax_vec       0
 188#else
 189#define TCG_TARGET_MAYBE_vec            1
 190#endif
 191#ifndef TCG_TARGET_HAS_v64
 192#define TCG_TARGET_HAS_v64              0
 193#endif
 194#ifndef TCG_TARGET_HAS_v128
 195#define TCG_TARGET_HAS_v128             0
 196#endif
 197#ifndef TCG_TARGET_HAS_v256
 198#define TCG_TARGET_HAS_v256             0
 199#endif
 200
 201#ifndef TARGET_INSN_START_EXTRA_WORDS
 202# define TARGET_INSN_START_WORDS 1
 203#else
 204# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
 205#endif
 206
 207typedef enum TCGOpcode {
 208#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
 209#include "tcg-opc.h"
 210#undef DEF
 211    NB_OPS,
 212} TCGOpcode;
 213
 214#define tcg_regset_set_reg(d, r)   ((d) |= (TCGRegSet)1 << (r))
 215#define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r)))
 216#define tcg_regset_test_reg(d, r)  (((d) >> (r)) & 1)
 217
 218#ifndef TCG_TARGET_INSN_UNIT_SIZE
 219# error "Missing TCG_TARGET_INSN_UNIT_SIZE"
 220#elif TCG_TARGET_INSN_UNIT_SIZE == 1
 221typedef uint8_t tcg_insn_unit;
 222#elif TCG_TARGET_INSN_UNIT_SIZE == 2
 223typedef uint16_t tcg_insn_unit;
 224#elif TCG_TARGET_INSN_UNIT_SIZE == 4
 225typedef uint32_t tcg_insn_unit;
 226#elif TCG_TARGET_INSN_UNIT_SIZE == 8
 227typedef uint64_t tcg_insn_unit;
 228#else
 229/* The port better have done this.  */
 230#endif
 231
 232
 233#if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
 234# define tcg_debug_assert(X) do { assert(X); } while (0)
 235#else
 236# define tcg_debug_assert(X) \
 237    do { if (!(X)) { __builtin_unreachable(); } } while (0)
 238#endif
 239
 240typedef struct TCGRelocation {
 241    struct TCGRelocation *next;
 242    int type;
 243    tcg_insn_unit *ptr;
 244    intptr_t addend;
 245} TCGRelocation; 
 246
 247typedef struct TCGLabel TCGLabel;
 248struct TCGLabel {
 249    unsigned present : 1;
 250    unsigned has_value : 1;
 251    unsigned id : 14;
 252    unsigned refs : 16;
 253    union {
 254        uintptr_t value;
 255        tcg_insn_unit *value_ptr;
 256        TCGRelocation *first_reloc;
 257    } u;
 258#ifdef CONFIG_DEBUG_TCG
 259    QSIMPLEQ_ENTRY(TCGLabel) next;
 260#endif
 261};
 262
 263typedef struct TCGPool {
 264    struct TCGPool *next;
 265    int size;
 266    uint8_t data[0] __attribute__ ((aligned));
 267} TCGPool;
 268
 269#define TCG_POOL_CHUNK_SIZE 32768
 270
 271#define TCG_MAX_TEMPS 512
 272#define TCG_MAX_INSNS 512
 273
 274/* when the size of the arguments of a called function is smaller than
 275   this value, they are statically allocated in the TB stack frame */
 276#define TCG_STATIC_CALL_ARGS_SIZE 128
 277
 278typedef enum TCGType {
 279    TCG_TYPE_I32,
 280    TCG_TYPE_I64,
 281
 282    TCG_TYPE_V64,
 283    TCG_TYPE_V128,
 284    TCG_TYPE_V256,
 285
 286    TCG_TYPE_COUNT, /* number of different types */
 287
 288    /* An alias for the size of the host register.  */
 289#if TCG_TARGET_REG_BITS == 32
 290    TCG_TYPE_REG = TCG_TYPE_I32,
 291#else
 292    TCG_TYPE_REG = TCG_TYPE_I64,
 293#endif
 294
 295    /* An alias for the size of the native pointer.  */
 296#if UINTPTR_MAX == UINT32_MAX
 297    TCG_TYPE_PTR = TCG_TYPE_I32,
 298#else
 299    TCG_TYPE_PTR = TCG_TYPE_I64,
 300#endif
 301
 302    /* An alias for the size of the target "long", aka register.  */
 303#if TARGET_LONG_BITS == 64
 304    TCG_TYPE_TL = TCG_TYPE_I64,
 305#else
 306    TCG_TYPE_TL = TCG_TYPE_I32,
 307#endif
 308} TCGType;
 309
 310/* Constants for qemu_ld and qemu_st for the Memory Operation field.  */
 311typedef enum TCGMemOp {
 312    MO_8     = 0,
 313    MO_16    = 1,
 314    MO_32    = 2,
 315    MO_64    = 3,
 316    MO_SIZE  = 3,   /* Mask for the above.  */
 317
 318    MO_SIGN  = 4,   /* Sign-extended, otherwise zero-extended.  */
 319
 320    MO_BSWAP = 8,   /* Host reverse endian.  */
 321#ifdef HOST_WORDS_BIGENDIAN
 322    MO_LE    = MO_BSWAP,
 323    MO_BE    = 0,
 324#else
 325    MO_LE    = 0,
 326    MO_BE    = MO_BSWAP,
 327#endif
 328#ifdef TARGET_WORDS_BIGENDIAN
 329    MO_TE    = MO_BE,
 330#else
 331    MO_TE    = MO_LE,
 332#endif
 333
 334    /* MO_UNALN accesses are never checked for alignment.
 335     * MO_ALIGN accesses will result in a call to the CPU's
 336     * do_unaligned_access hook if the guest address is not aligned.
 337     * The default depends on whether the target CPU defines ALIGNED_ONLY.
 338     *
 339     * Some architectures (e.g. ARMv8) need the address which is aligned
 340     * to a size more than the size of the memory access.
 341     * Some architectures (e.g. SPARCv9) need an address which is aligned,
 342     * but less strictly than the natural alignment.
 343     *
 344     * MO_ALIGN supposes the alignment size is the size of a memory access.
 345     *
 346     * There are three options:
 347     * - unaligned access permitted (MO_UNALN).
 348     * - an alignment to the size of an access (MO_ALIGN);
 349     * - an alignment to a specified size, which may be more or less than
 350     *   the access size (MO_ALIGN_x where 'x' is a size in bytes);
 351     */
 352    MO_ASHIFT = 4,
 353    MO_AMASK = 7 << MO_ASHIFT,
 354#ifdef ALIGNED_ONLY
 355    MO_ALIGN = 0,
 356    MO_UNALN = MO_AMASK,
 357#else
 358    MO_ALIGN = MO_AMASK,
 359    MO_UNALN = 0,
 360#endif
 361    MO_ALIGN_2  = 1 << MO_ASHIFT,
 362    MO_ALIGN_4  = 2 << MO_ASHIFT,
 363    MO_ALIGN_8  = 3 << MO_ASHIFT,
 364    MO_ALIGN_16 = 4 << MO_ASHIFT,
 365    MO_ALIGN_32 = 5 << MO_ASHIFT,
 366    MO_ALIGN_64 = 6 << MO_ASHIFT,
 367
 368    /* Combinations of the above, for ease of use.  */
 369    MO_UB    = MO_8,
 370    MO_UW    = MO_16,
 371    MO_UL    = MO_32,
 372    MO_SB    = MO_SIGN | MO_8,
 373    MO_SW    = MO_SIGN | MO_16,
 374    MO_SL    = MO_SIGN | MO_32,
 375    MO_Q     = MO_64,
 376
 377    MO_LEUW  = MO_LE | MO_UW,
 378    MO_LEUL  = MO_LE | MO_UL,
 379    MO_LESW  = MO_LE | MO_SW,
 380    MO_LESL  = MO_LE | MO_SL,
 381    MO_LEQ   = MO_LE | MO_Q,
 382
 383    MO_BEUW  = MO_BE | MO_UW,
 384    MO_BEUL  = MO_BE | MO_UL,
 385    MO_BESW  = MO_BE | MO_SW,
 386    MO_BESL  = MO_BE | MO_SL,
 387    MO_BEQ   = MO_BE | MO_Q,
 388
 389    MO_TEUW  = MO_TE | MO_UW,
 390    MO_TEUL  = MO_TE | MO_UL,
 391    MO_TESW  = MO_TE | MO_SW,
 392    MO_TESL  = MO_TE | MO_SL,
 393    MO_TEQ   = MO_TE | MO_Q,
 394
 395    MO_SSIZE = MO_SIZE | MO_SIGN,
 396} TCGMemOp;
 397
 398/**
 399 * get_alignment_bits
 400 * @memop: TCGMemOp value
 401 *
 402 * Extract the alignment size from the memop.
 403 */
 404static inline unsigned get_alignment_bits(TCGMemOp memop)
 405{
 406    unsigned a = memop & MO_AMASK;
 407
 408    if (a == MO_UNALN) {
 409        /* No alignment required.  */
 410        a = 0;
 411    } else if (a == MO_ALIGN) {
 412        /* A natural alignment requirement.  */
 413        a = memop & MO_SIZE;
 414    } else {
 415        /* A specific alignment requirement.  */
 416        a = a >> MO_ASHIFT;
 417    }
 418#if defined(CONFIG_SOFTMMU)
 419    /* The requested alignment cannot overlap the TLB flags.  */
 420    tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0);
 421#endif
 422    return a;
 423}
 424
 425typedef tcg_target_ulong TCGArg;
 426
 427/* Define type and accessor macros for TCG variables.
 428
 429   TCG variables are the inputs and outputs of TCG ops, as described
 430   in tcg/README. Target CPU front-end code uses these types to deal
 431   with TCG variables as it emits TCG code via the tcg_gen_* functions.
 432   They come in several flavours:
 433    * TCGv_i32 : 32 bit integer type
 434    * TCGv_i64 : 64 bit integer type
 435    * TCGv_ptr : a host pointer type
 436    * TCGv_vec : a host vector type; the exact size is not exposed
 437                 to the CPU front-end code.
 438    * TCGv : an integer type the same size as target_ulong
 439             (an alias for either TCGv_i32 or TCGv_i64)
 440   The compiler's type checking will complain if you mix them
 441   up and pass the wrong sized TCGv to a function.
 442
 443   Users of tcg_gen_* don't need to know about any of the internal
 444   details of these, and should treat them as opaque types.
 445   You won't be able to look inside them in a debugger either.
 446
 447   Internal implementation details follow:
 448
 449   Note that there is no definition of the structs TCGv_i32_d etc anywhere.
 450   This is deliberate, because the values we store in variables of type
 451   TCGv_i32 are not really pointers-to-structures. They're just small
 452   integers, but keeping them in pointer types like this means that the
 453   compiler will complain if you accidentally pass a TCGv_i32 to a
 454   function which takes a TCGv_i64, and so on. Only the internals of
 455   TCG need to care about the actual contents of the types.  */
 456
 457typedef struct TCGv_i32_d *TCGv_i32;
 458typedef struct TCGv_i64_d *TCGv_i64;
 459typedef struct TCGv_ptr_d *TCGv_ptr;
 460typedef struct TCGv_vec_d *TCGv_vec;
 461typedef TCGv_ptr TCGv_env;
 462#if TARGET_LONG_BITS == 32
 463#define TCGv TCGv_i32
 464#elif TARGET_LONG_BITS == 64
 465#define TCGv TCGv_i64
 466#else
 467#error Unhandled TARGET_LONG_BITS value
 468#endif
 469
 470/* call flags */
 471/* Helper does not read globals (either directly or through an exception). It
 472   implies TCG_CALL_NO_WRITE_GLOBALS. */
 473#define TCG_CALL_NO_READ_GLOBALS    0x0001
 474/* Helper does not write globals */
 475#define TCG_CALL_NO_WRITE_GLOBALS   0x0002
 476/* Helper can be safely suppressed if the return value is not used. */
 477#define TCG_CALL_NO_SIDE_EFFECTS    0x0004
 478/* Helper is QEMU_NORETURN.  */
 479#define TCG_CALL_NO_RETURN          0x0008
 480
 481/* convenience version of most used call flags */
 482#define TCG_CALL_NO_RWG         TCG_CALL_NO_READ_GLOBALS
 483#define TCG_CALL_NO_WG          TCG_CALL_NO_WRITE_GLOBALS
 484#define TCG_CALL_NO_SE          TCG_CALL_NO_SIDE_EFFECTS
 485#define TCG_CALL_NO_RWG_SE      (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
 486#define TCG_CALL_NO_WG_SE       (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
 487
 488/* Used to align parameters.  See the comment before tcgv_i32_temp.  */
 489#define TCG_CALL_DUMMY_ARG      ((TCGArg)0)
 490
 491/* Conditions.  Note that these are laid out for easy manipulation by
 492   the functions below:
 493     bit 0 is used for inverting;
 494     bit 1 is signed,
 495     bit 2 is unsigned,
 496     bit 3 is used with bit 0 for swapping signed/unsigned.  */
 497typedef enum {
 498    /* non-signed */
 499    TCG_COND_NEVER  = 0 | 0 | 0 | 0,
 500    TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
 501    TCG_COND_EQ     = 8 | 0 | 0 | 0,
 502    TCG_COND_NE     = 8 | 0 | 0 | 1,
 503    /* signed */
 504    TCG_COND_LT     = 0 | 0 | 2 | 0,
 505    TCG_COND_GE     = 0 | 0 | 2 | 1,
 506    TCG_COND_LE     = 8 | 0 | 2 | 0,
 507    TCG_COND_GT     = 8 | 0 | 2 | 1,
 508    /* unsigned */
 509    TCG_COND_LTU    = 0 | 4 | 0 | 0,
 510    TCG_COND_GEU    = 0 | 4 | 0 | 1,
 511    TCG_COND_LEU    = 8 | 4 | 0 | 0,
 512    TCG_COND_GTU    = 8 | 4 | 0 | 1,
 513} TCGCond;
 514
 515/* Invert the sense of the comparison.  */
 516static inline TCGCond tcg_invert_cond(TCGCond c)
 517{
 518    return (TCGCond)(c ^ 1);
 519}
 520
 521/* Swap the operands in a comparison.  */
 522static inline TCGCond tcg_swap_cond(TCGCond c)
 523{
 524    return c & 6 ? (TCGCond)(c ^ 9) : c;
 525}
 526
 527/* Create an "unsigned" version of a "signed" comparison.  */
 528static inline TCGCond tcg_unsigned_cond(TCGCond c)
 529{
 530    return c & 2 ? (TCGCond)(c ^ 6) : c;
 531}
 532
 533/* Create a "signed" version of an "unsigned" comparison.  */
 534static inline TCGCond tcg_signed_cond(TCGCond c)
 535{
 536    return c & 4 ? (TCGCond)(c ^ 6) : c;
 537}
 538
 539/* Must a comparison be considered unsigned?  */
 540static inline bool is_unsigned_cond(TCGCond c)
 541{
 542    return (c & 4) != 0;
 543}
 544
 545/* Create a "high" version of a double-word comparison.
 546   This removes equality from a LTE or GTE comparison.  */
 547static inline TCGCond tcg_high_cond(TCGCond c)
 548{
 549    switch (c) {
 550    case TCG_COND_GE:
 551    case TCG_COND_LE:
 552    case TCG_COND_GEU:
 553    case TCG_COND_LEU:
 554        return (TCGCond)(c ^ 8);
 555    default:
 556        return c;
 557    }
 558}
 559
 560typedef enum TCGTempVal {
 561    TEMP_VAL_DEAD,
 562    TEMP_VAL_REG,
 563    TEMP_VAL_MEM,
 564    TEMP_VAL_CONST,
 565} TCGTempVal;
 566
 567typedef struct TCGTemp {
 568    TCGReg reg:8;
 569    TCGTempVal val_type:8;
 570    TCGType base_type:8;
 571    TCGType type:8;
 572    unsigned int fixed_reg:1;
 573    unsigned int indirect_reg:1;
 574    unsigned int indirect_base:1;
 575    unsigned int mem_coherent:1;
 576    unsigned int mem_allocated:1;
 577    /* If true, the temp is saved across both basic blocks and
 578       translation blocks.  */
 579    unsigned int temp_global:1;
 580    /* If true, the temp is saved across basic blocks but dead
 581       at the end of translation blocks.  If false, the temp is
 582       dead at the end of basic blocks.  */
 583    unsigned int temp_local:1;
 584    unsigned int temp_allocated:1;
 585
 586    tcg_target_long val;
 587    struct TCGTemp *mem_base;
 588    intptr_t mem_offset;
 589    const char *name;
 590
 591    /* Pass-specific information that can be stored for a temporary.
 592       One word worth of integer data, and one pointer to data
 593       allocated separately.  */
 594    uintptr_t state;
 595    void *state_ptr;
 596} TCGTemp;
 597
 598typedef struct TCGContext TCGContext;
 599
 600typedef struct TCGTempSet {
 601    unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
 602} TCGTempSet;
 603
 604/* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
 605   this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
 606   There are never more than 2 outputs, which means that we can store all
 607   dead + sync data within 16 bits.  */
 608#define DEAD_ARG  4
 609#define SYNC_ARG  1
 610typedef uint16_t TCGLifeData;
 611
 612/* The layout here is designed to avoid a bitfield crossing of
 613   a 32-bit boundary, which would cause GCC to add extra padding.  */
 614typedef struct TCGOp {
 615    TCGOpcode opc   : 8;        /*  8 */
 616
 617    /* Parameters for this opcode.  See below.  */
 618    unsigned param1 : 4;        /* 12 */
 619    unsigned param2 : 4;        /* 16 */
 620
 621    /* Lifetime data of the operands.  */
 622    unsigned life   : 16;       /* 32 */
 623
 624    /* Next and previous opcodes.  */
 625    QTAILQ_ENTRY(TCGOp) link;
 626
 627    /* Arguments for the opcode.  */
 628    TCGArg args[MAX_OPC_PARAM];
 629
 630    /* Register preferences for the output(s).  */
 631    TCGRegSet output_pref[2];
 632} TCGOp;
 633
 634#define TCGOP_CALLI(X)    (X)->param1
 635#define TCGOP_CALLO(X)    (X)->param2
 636
 637#define TCGOP_VECL(X)     (X)->param1
 638#define TCGOP_VECE(X)     (X)->param2
 639
 640/* Make sure operands fit in the bitfields above.  */
 641QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
 642
 643typedef struct TCGProfile {
 644    int64_t cpu_exec_time;
 645    int64_t tb_count1;
 646    int64_t tb_count;
 647    int64_t op_count; /* total insn count */
 648    int op_count_max; /* max insn per TB */
 649    int temp_count_max;
 650    int64_t temp_count;
 651    int64_t del_op_count;
 652    int64_t code_in_len;
 653    int64_t code_out_len;
 654    int64_t search_out_len;
 655    int64_t interm_time;
 656    int64_t code_time;
 657    int64_t la_time;
 658    int64_t opt_time;
 659    int64_t restore_count;
 660    int64_t restore_time;
 661    int64_t table_op_count[NB_OPS];
 662} TCGProfile;
 663
 664struct TCGContext {
 665    uint8_t *pool_cur, *pool_end;
 666    TCGPool *pool_first, *pool_current, *pool_first_large;
 667    int nb_labels;
 668    int nb_globals;
 669    int nb_temps;
 670    int nb_indirects;
 671    int nb_ops;
 672
 673    /* goto_tb support */
 674    tcg_insn_unit *code_buf;
 675    uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
 676    uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
 677    uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
 678
 679    TCGRegSet reserved_regs;
 680    uint32_t tb_cflags; /* cflags of the current TB */
 681    intptr_t current_frame_offset;
 682    intptr_t frame_start;
 683    intptr_t frame_end;
 684    TCGTemp *frame_temp;
 685
 686    tcg_insn_unit *code_ptr;
 687
 688#ifdef CONFIG_PROFILER
 689    TCGProfile prof;
 690#endif
 691
 692#ifdef CONFIG_DEBUG_TCG
 693    QSIMPLEQ_HEAD(, TCGLabel) labels;
 694    int temps_in_use;
 695    int goto_tb_issue_mask;
 696#endif
 697
 698    /* Code generation.  Note that we specifically do not use tcg_insn_unit
 699       here, because there's too much arithmetic throughout that relies
 700       on addition and subtraction working on bytes.  Rely on the GCC
 701       extension that allows arithmetic on void*.  */
 702    void *code_gen_prologue;
 703    void *code_gen_epilogue;
 704    void *code_gen_buffer;
 705    size_t code_gen_buffer_size;
 706    void *code_gen_ptr;
 707    void *data_gen_ptr;
 708
 709    /* Threshold to flush the translated code buffer.  */
 710    void *code_gen_highwater;
 711
 712    size_t tb_phys_invalidate_count;
 713
 714    /* Track which vCPU triggers events */
 715    CPUState *cpu;                      /* *_trans */
 716
 717    /* These structures are private to tcg-target.inc.c.  */
 718#ifdef TCG_TARGET_NEED_LDST_LABELS
 719    QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels;
 720#endif
 721#ifdef TCG_TARGET_NEED_POOL_LABELS
 722    struct TCGLabelPoolData *pool_labels;
 723#endif
 724
 725    TCGLabel *exitreq_label;
 726
 727    TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
 728    TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
 729
 730    QTAILQ_HEAD(, TCGOp) ops, free_ops;
 731
 732    /* Tells which temporary holds a given register.
 733       It does not take into account fixed registers */
 734    TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
 735
 736    uint16_t gen_insn_end_off[TCG_MAX_INSNS];
 737    target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
 738};
 739
 740extern TCGContext tcg_init_ctx;
 741extern __thread TCGContext *tcg_ctx;
 742extern TCGv_env cpu_env;
 743
 744static inline size_t temp_idx(TCGTemp *ts)
 745{
 746    ptrdiff_t n = ts - tcg_ctx->temps;
 747    tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps);
 748    return n;
 749}
 750
 751static inline TCGArg temp_arg(TCGTemp *ts)
 752{
 753    return (uintptr_t)ts;
 754}
 755
 756static inline TCGTemp *arg_temp(TCGArg a)
 757{
 758    return (TCGTemp *)(uintptr_t)a;
 759}
 760
 761/* Using the offset of a temporary, relative to TCGContext, rather than
 762   its index means that we don't use 0.  That leaves offset 0 free for
 763   a NULL representation without having to leave index 0 unused.  */
 764static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v)
 765{
 766    uintptr_t o = (uintptr_t)v;
 767    TCGTemp *t = (void *)tcg_ctx + o;
 768    tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o);
 769    return t;
 770}
 771
 772static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
 773{
 774    return tcgv_i32_temp((TCGv_i32)v);
 775}
 776
 777static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
 778{
 779    return tcgv_i32_temp((TCGv_i32)v);
 780}
 781
 782static inline TCGTemp *tcgv_vec_temp(TCGv_vec v)
 783{
 784    return tcgv_i32_temp((TCGv_i32)v);
 785}
 786
 787static inline TCGArg tcgv_i32_arg(TCGv_i32 v)
 788{
 789    return temp_arg(tcgv_i32_temp(v));
 790}
 791
 792static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
 793{
 794    return temp_arg(tcgv_i64_temp(v));
 795}
 796
 797static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
 798{
 799    return temp_arg(tcgv_ptr_temp(v));
 800}
 801
 802static inline TCGArg tcgv_vec_arg(TCGv_vec v)
 803{
 804    return temp_arg(tcgv_vec_temp(v));
 805}
 806
 807static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t)
 808{
 809    (void)temp_idx(t); /* trigger embedded assert */
 810    return (TCGv_i32)((void *)t - (void *)tcg_ctx);
 811}
 812
 813static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
 814{
 815    return (TCGv_i64)temp_tcgv_i32(t);
 816}
 817
 818static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
 819{
 820    return (TCGv_ptr)temp_tcgv_i32(t);
 821}
 822
 823static inline TCGv_vec temp_tcgv_vec(TCGTemp *t)
 824{
 825    return (TCGv_vec)temp_tcgv_i32(t);
 826}
 827
 828#if TCG_TARGET_REG_BITS == 32
 829static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
 830{
 831    return temp_tcgv_i32(tcgv_i64_temp(t));
 832}
 833
 834static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
 835{
 836    return temp_tcgv_i32(tcgv_i64_temp(t) + 1);
 837}
 838#endif
 839
 840static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
 841{
 842    op->args[arg] = v;
 843}
 844
 845static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v)
 846{
 847#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
 848    tcg_set_insn_param(op, arg, v);
 849#else
 850    tcg_set_insn_param(op, arg * 2, v);
 851    tcg_set_insn_param(op, arg * 2 + 1, v >> 32);
 852#endif
 853}
 854
 855/* The last op that was emitted.  */
 856static inline TCGOp *tcg_last_op(void)
 857{
 858    return QTAILQ_LAST(&tcg_ctx->ops);
 859}
 860
 861/* Test for whether to terminate the TB for using too many opcodes.  */
 862static inline bool tcg_op_buf_full(void)
 863{
 864    /* This is not a hard limit, it merely stops translation when
 865     * we have produced "enough" opcodes.  We want to limit TB size
 866     * such that a RISC host can reasonably use a 16-bit signed
 867     * branch within the TB.  We also need to be mindful of the
 868     * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[]
 869     * and TCGContext.gen_insn_end_off[].
 870     */
 871    return tcg_ctx->nb_ops >= 4000;
 872}
 873
 874/* pool based memory allocation */
 875
 876/* user-mode: mmap_lock must be held for tcg_malloc_internal. */
 877void *tcg_malloc_internal(TCGContext *s, int size);
 878void tcg_pool_reset(TCGContext *s);
 879TranslationBlock *tcg_tb_alloc(TCGContext *s);
 880
 881void tcg_region_init(void);
 882void tcg_region_reset_all(void);
 883
 884size_t tcg_code_size(void);
 885size_t tcg_code_capacity(void);
 886
 887void tcg_tb_insert(TranslationBlock *tb);
 888void tcg_tb_remove(TranslationBlock *tb);
 889size_t tcg_tb_phys_invalidate_count(void);
 890TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
 891void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
 892size_t tcg_nb_tbs(void);
 893
 894/* user-mode: Called with mmap_lock held.  */
 895static inline void *tcg_malloc(int size)
 896{
 897    TCGContext *s = tcg_ctx;
 898    uint8_t *ptr, *ptr_end;
 899
 900    /* ??? This is a weak placeholder for minimum malloc alignment.  */
 901    size = QEMU_ALIGN_UP(size, 8);
 902
 903    ptr = s->pool_cur;
 904    ptr_end = ptr + size;
 905    if (unlikely(ptr_end > s->pool_end)) {
 906        return tcg_malloc_internal(tcg_ctx, size);
 907    } else {
 908        s->pool_cur = ptr_end;
 909        return ptr;
 910    }
 911}
 912
 913void tcg_context_init(TCGContext *s);
 914void tcg_register_thread(void);
 915void tcg_prologue_init(TCGContext *s);
 916void tcg_func_start(TCGContext *s);
 917
 918int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
 919
 920void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
 921
 922TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
 923                                     intptr_t, const char *);
 924TCGTemp *tcg_temp_new_internal(TCGType, bool);
 925void tcg_temp_free_internal(TCGTemp *);
 926TCGv_vec tcg_temp_new_vec(TCGType type);
 927TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
 928
 929static inline void tcg_temp_free_i32(TCGv_i32 arg)
 930{
 931    tcg_temp_free_internal(tcgv_i32_temp(arg));
 932}
 933
 934static inline void tcg_temp_free_i64(TCGv_i64 arg)
 935{
 936    tcg_temp_free_internal(tcgv_i64_temp(arg));
 937}
 938
 939static inline void tcg_temp_free_ptr(TCGv_ptr arg)
 940{
 941    tcg_temp_free_internal(tcgv_ptr_temp(arg));
 942}
 943
 944static inline void tcg_temp_free_vec(TCGv_vec arg)
 945{
 946    tcg_temp_free_internal(tcgv_vec_temp(arg));
 947}
 948
 949static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
 950                                              const char *name)
 951{
 952    TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
 953    return temp_tcgv_i32(t);
 954}
 955
 956static inline TCGv_i32 tcg_temp_new_i32(void)
 957{
 958    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, false);
 959    return temp_tcgv_i32(t);
 960}
 961
 962static inline TCGv_i32 tcg_temp_local_new_i32(void)
 963{
 964    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, true);
 965    return temp_tcgv_i32(t);
 966}
 967
 968static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
 969                                              const char *name)
 970{
 971    TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
 972    return temp_tcgv_i64(t);
 973}
 974
 975static inline TCGv_i64 tcg_temp_new_i64(void)
 976{
 977    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, false);
 978    return temp_tcgv_i64(t);
 979}
 980
 981static inline TCGv_i64 tcg_temp_local_new_i64(void)
 982{
 983    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, true);
 984    return temp_tcgv_i64(t);
 985}
 986
 987static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset,
 988                                              const char *name)
 989{
 990    TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_PTR, reg, offset, name);
 991    return temp_tcgv_ptr(t);
 992}
 993
 994static inline TCGv_ptr tcg_temp_new_ptr(void)
 995{
 996    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, false);
 997    return temp_tcgv_ptr(t);
 998}
 999
1000static inline TCGv_ptr tcg_temp_local_new_ptr(void)
1001{
1002    TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, true);
1003    return temp_tcgv_ptr(t);
1004}
1005
1006#if defined(CONFIG_DEBUG_TCG)
1007/* If you call tcg_clear_temp_count() at the start of a section of
1008 * code which is not supposed to leak any TCG temporaries, then
1009 * calling tcg_check_temp_count() at the end of the section will
1010 * return 1 if the section did in fact leak a temporary.
1011 */
1012void tcg_clear_temp_count(void);
1013int tcg_check_temp_count(void);
1014#else
1015#define tcg_clear_temp_count() do { } while (0)
1016#define tcg_check_temp_count() 0
1017#endif
1018
1019int64_t tcg_cpu_exec_time(void);
1020void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf);
1021void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf);
1022
1023#define TCG_CT_ALIAS  0x80
1024#define TCG_CT_IALIAS 0x40
1025#define TCG_CT_NEWREG 0x20 /* output requires a new register */
1026#define TCG_CT_REG    0x01
1027#define TCG_CT_CONST  0x02 /* any constant of register size */
1028
1029typedef struct TCGArgConstraint {
1030    uint16_t ct;
1031    uint8_t alias_index;
1032    union {
1033        TCGRegSet regs;
1034    } u;
1035} TCGArgConstraint;
1036
1037#define TCG_MAX_OP_ARGS 16
1038
1039/* Bits for TCGOpDef->flags, 8 bits available.  */
1040enum {
1041    /* Instruction exits the translation block.  */
1042    TCG_OPF_BB_EXIT      = 0x01,
1043    /* Instruction defines the end of a basic block.  */
1044    TCG_OPF_BB_END       = 0x02,
1045    /* Instruction clobbers call registers and potentially update globals.  */
1046    TCG_OPF_CALL_CLOBBER = 0x04,
1047    /* Instruction has side effects: it cannot be removed if its outputs
1048       are not used, and might trigger exceptions.  */
1049    TCG_OPF_SIDE_EFFECTS = 0x08,
1050    /* Instruction operands are 64-bits (otherwise 32-bits).  */
1051    TCG_OPF_64BIT        = 0x10,
1052    /* Instruction is optional and not implemented by the host, or insn
1053       is generic and should not be implemened by the host.  */
1054    TCG_OPF_NOT_PRESENT  = 0x20,
1055    /* Instruction operands are vectors.  */
1056    TCG_OPF_VECTOR       = 0x40,
1057};
1058
1059typedef struct TCGOpDef {
1060    const char *name;
1061    uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
1062    uint8_t flags;
1063    TCGArgConstraint *args_ct;
1064    int *sorted_args;
1065#if defined(CONFIG_DEBUG_TCG)
1066    int used;
1067#endif
1068} TCGOpDef;
1069
1070extern TCGOpDef tcg_op_defs[];
1071extern const size_t tcg_op_defs_max;
1072
1073typedef struct TCGTargetOpDef {
1074    TCGOpcode op;
1075    const char *args_ct_str[TCG_MAX_OP_ARGS];
1076} TCGTargetOpDef;
1077
1078#define tcg_abort() \
1079do {\
1080    fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
1081    abort();\
1082} while (0)
1083
1084bool tcg_op_supported(TCGOpcode op);
1085
1086void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
1087
1088TCGOp *tcg_emit_op(TCGOpcode opc);
1089void tcg_op_remove(TCGContext *s, TCGOp *op);
1090TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc);
1091TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc);
1092
1093void tcg_optimize(TCGContext *s);
1094
1095TCGv_i32 tcg_const_i32(int32_t val);
1096TCGv_i64 tcg_const_i64(int64_t val);
1097TCGv_i32 tcg_const_local_i32(int32_t val);
1098TCGv_i64 tcg_const_local_i64(int64_t val);
1099TCGv_vec tcg_const_zeros_vec(TCGType);
1100TCGv_vec tcg_const_ones_vec(TCGType);
1101TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
1102TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
1103
1104#if UINTPTR_MAX == UINT32_MAX
1105# define tcg_const_ptr(x)        ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
1106# define tcg_const_local_ptr(x)  ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x)))
1107#else
1108# define tcg_const_ptr(x)        ((TCGv_ptr)tcg_const_i64((intptr_t)(x)))
1109# define tcg_const_local_ptr(x)  ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x)))
1110#endif
1111
1112TCGLabel *gen_new_label(void);
1113
1114/**
1115 * label_arg
1116 * @l: label
1117 *
1118 * Encode a label for storage in the TCG opcode stream.
1119 */
1120
1121static inline TCGArg label_arg(TCGLabel *l)
1122{
1123    return (uintptr_t)l;
1124}
1125
1126/**
1127 * arg_label
1128 * @i: value
1129 *
1130 * The opposite of label_arg.  Retrieve a label from the
1131 * encoding of the TCG opcode stream.
1132 */
1133
1134static inline TCGLabel *arg_label(TCGArg i)
1135{
1136    return (TCGLabel *)(uintptr_t)i;
1137}
1138
1139/**
1140 * tcg_ptr_byte_diff
1141 * @a, @b: addresses to be differenced
1142 *
1143 * There are many places within the TCG backends where we need a byte
1144 * difference between two pointers.  While this can be accomplished
1145 * with local casting, it's easy to get wrong -- especially if one is
1146 * concerned with the signedness of the result.
1147 *
1148 * This version relies on GCC's void pointer arithmetic to get the
1149 * correct result.
1150 */
1151
1152static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
1153{
1154    return a - b;
1155}
1156
1157/**
1158 * tcg_pcrel_diff
1159 * @s: the tcg context
1160 * @target: address of the target
1161 *
1162 * Produce a pc-relative difference, from the current code_ptr
1163 * to the destination address.
1164 */
1165
1166static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target)
1167{
1168    return tcg_ptr_byte_diff(target, s->code_ptr);
1169}
1170
1171/**
1172 * tcg_current_code_size
1173 * @s: the tcg context
1174 *
1175 * Compute the current code size within the translation block.
1176 * This is used to fill in qemu's data structures for goto_tb.
1177 */
1178
1179static inline size_t tcg_current_code_size(TCGContext *s)
1180{
1181    return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
1182}
1183
1184/* Combine the TCGMemOp and mmu_idx parameters into a single value.  */
1185typedef uint32_t TCGMemOpIdx;
1186
1187/**
1188 * make_memop_idx
1189 * @op: memory operation
1190 * @idx: mmu index
1191 *
1192 * Encode these values into a single parameter.
1193 */
1194static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
1195{
1196    tcg_debug_assert(idx <= 15);
1197    return (op << 4) | idx;
1198}
1199
1200/**
1201 * get_memop
1202 * @oi: combined op/idx parameter
1203 *
1204 * Extract the memory operation from the combined value.
1205 */
1206static inline TCGMemOp get_memop(TCGMemOpIdx oi)
1207{
1208    return oi >> 4;
1209}
1210
1211/**
1212 * get_mmuidx
1213 * @oi: combined op/idx parameter
1214 *
1215 * Extract the mmu index from the combined value.
1216 */
1217static inline unsigned get_mmuidx(TCGMemOpIdx oi)
1218{
1219    return oi & 15;
1220}
1221
1222/**
1223 * tcg_qemu_tb_exec:
1224 * @env: pointer to CPUArchState for the CPU
1225 * @tb_ptr: address of generated code for the TB to execute
1226 *
1227 * Start executing code from a given translation block.
1228 * Where translation blocks have been linked, execution
1229 * may proceed from the given TB into successive ones.
1230 * Control eventually returns only when some action is needed
1231 * from the top-level loop: either control must pass to a TB
1232 * which has not yet been directly linked, or an asynchronous
1233 * event such as an interrupt needs handling.
1234 *
1235 * Return: The return value is the value passed to the corresponding
1236 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
1237 * The value is either zero or a 4-byte aligned pointer to that TB combined
1238 * with additional information in its two least significant bits. The
1239 * additional information is encoded as follows:
1240 *  0, 1: the link between this TB and the next is via the specified
1241 *        TB index (0 or 1). That is, we left the TB via (the equivalent
1242 *        of) "goto_tb <index>". The main loop uses this to determine
1243 *        how to link the TB just executed to the next.
1244 *  2:    we are using instruction counting code generation, and we
1245 *        did not start executing this TB because the instruction counter
1246 *        would hit zero midway through it. In this case the pointer
1247 *        returned is the TB we were about to execute, and the caller must
1248 *        arrange to execute the remaining count of instructions.
1249 *  3:    we stopped because the CPU's exit_request flag was set
1250 *        (usually meaning that there is an interrupt that needs to be
1251 *        handled). The pointer returned is the TB we were about to execute
1252 *        when we noticed the pending exit request.
1253 *
1254 * If the bottom two bits indicate an exit-via-index then the CPU
1255 * state is correctly synchronised and ready for execution of the next
1256 * TB (and in particular the guest PC is the address to execute next).
1257 * Otherwise, we gave up on execution of this TB before it started, and
1258 * the caller must fix up the CPU state by calling the CPU's
1259 * synchronize_from_tb() method with the TB pointer we return (falling
1260 * back to calling the CPU's set_pc method with tb->pb if no
1261 * synchronize_from_tb() method exists).
1262 *
1263 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
1264 * to this default (which just calls the prologue.code emitted by
1265 * tcg_target_qemu_prologue()).
1266 */
1267#define TB_EXIT_MASK      3
1268#define TB_EXIT_IDX0      0
1269#define TB_EXIT_IDX1      1
1270#define TB_EXIT_IDXMAX    1
1271#define TB_EXIT_REQUESTED 3
1272
1273#ifdef HAVE_TCG_QEMU_TB_EXEC
1274uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
1275#else
1276# define tcg_qemu_tb_exec(env, tb_ptr) \
1277    ((uintptr_t (*)(void *, void *))tcg_ctx->code_gen_prologue)(env, tb_ptr)
1278#endif
1279
1280void tcg_register_jit(void *buf, size_t buf_size);
1281
1282#if TCG_TARGET_MAYBE_vec
1283/* Return zero if the tuple (opc, type, vece) is unsupportable;
1284   return > 0 if it is directly supportable;
1285   return < 0 if we must call tcg_expand_vec_op.  */
1286int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned);
1287#else
1288static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
1289{
1290    return 0;
1291}
1292#endif
1293
1294/* Expand the tuple (opc, type, vece) on the given arguments.  */
1295void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...);
1296
1297/* Replicate a constant C accoring to the log2 of the element size.  */
1298uint64_t dup_const(unsigned vece, uint64_t c);
1299
1300#define dup_const(VECE, C)                                         \
1301    (__builtin_constant_p(VECE)                                    \
1302     ? (  (VECE) == MO_8  ? 0x0101010101010101ull * (uint8_t)(C)   \
1303        : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C)  \
1304        : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C)  \
1305        : dup_const(VECE, C))                                      \
1306     : dup_const(VECE, C))
1307
1308
1309/*
1310 * Memory helpers that will be used by TCG generated code.
1311 */
1312#ifdef CONFIG_SOFTMMU
1313/* Value zero-extended to tcg register size.  */
1314tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1315                                     TCGMemOpIdx oi, uintptr_t retaddr);
1316tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1317                                    TCGMemOpIdx oi, uintptr_t retaddr);
1318tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1319                                    TCGMemOpIdx oi, uintptr_t retaddr);
1320uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1321                           TCGMemOpIdx oi, uintptr_t retaddr);
1322tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1323                                    TCGMemOpIdx oi, uintptr_t retaddr);
1324tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1325                                    TCGMemOpIdx oi, uintptr_t retaddr);
1326uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1327                           TCGMemOpIdx oi, uintptr_t retaddr);
1328
1329/* Value sign-extended to tcg register size.  */
1330tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1331                                     TCGMemOpIdx oi, uintptr_t retaddr);
1332tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1333                                    TCGMemOpIdx oi, uintptr_t retaddr);
1334tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1335                                    TCGMemOpIdx oi, uintptr_t retaddr);
1336tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1337                                    TCGMemOpIdx oi, uintptr_t retaddr);
1338tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1339                                    TCGMemOpIdx oi, uintptr_t retaddr);
1340
1341void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1342                        TCGMemOpIdx oi, uintptr_t retaddr);
1343void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1344                       TCGMemOpIdx oi, uintptr_t retaddr);
1345void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1346                       TCGMemOpIdx oi, uintptr_t retaddr);
1347void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1348                       TCGMemOpIdx oi, uintptr_t retaddr);
1349void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1350                       TCGMemOpIdx oi, uintptr_t retaddr);
1351void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1352                       TCGMemOpIdx oi, uintptr_t retaddr);
1353void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1354                       TCGMemOpIdx oi, uintptr_t retaddr);
1355
1356uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1357                            TCGMemOpIdx oi, uintptr_t retaddr);
1358uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1359                            TCGMemOpIdx oi, uintptr_t retaddr);
1360uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1361                            TCGMemOpIdx oi, uintptr_t retaddr);
1362uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1363                            TCGMemOpIdx oi, uintptr_t retaddr);
1364uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1365                            TCGMemOpIdx oi, uintptr_t retaddr);
1366uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1367                            TCGMemOpIdx oi, uintptr_t retaddr);
1368uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1369                            TCGMemOpIdx oi, uintptr_t retaddr);
1370
1371/* Temporary aliases until backends are converted.  */
1372#ifdef TARGET_WORDS_BIGENDIAN
1373# define helper_ret_ldsw_mmu  helper_be_ldsw_mmu
1374# define helper_ret_lduw_mmu  helper_be_lduw_mmu
1375# define helper_ret_ldsl_mmu  helper_be_ldsl_mmu
1376# define helper_ret_ldul_mmu  helper_be_ldul_mmu
1377# define helper_ret_ldl_mmu   helper_be_ldul_mmu
1378# define helper_ret_ldq_mmu   helper_be_ldq_mmu
1379# define helper_ret_stw_mmu   helper_be_stw_mmu
1380# define helper_ret_stl_mmu   helper_be_stl_mmu
1381# define helper_ret_stq_mmu   helper_be_stq_mmu
1382# define helper_ret_ldw_cmmu  helper_be_ldw_cmmu
1383# define helper_ret_ldl_cmmu  helper_be_ldl_cmmu
1384# define helper_ret_ldq_cmmu  helper_be_ldq_cmmu
1385#else
1386# define helper_ret_ldsw_mmu  helper_le_ldsw_mmu
1387# define helper_ret_lduw_mmu  helper_le_lduw_mmu
1388# define helper_ret_ldsl_mmu  helper_le_ldsl_mmu
1389# define helper_ret_ldul_mmu  helper_le_ldul_mmu
1390# define helper_ret_ldl_mmu   helper_le_ldul_mmu
1391# define helper_ret_ldq_mmu   helper_le_ldq_mmu
1392# define helper_ret_stw_mmu   helper_le_stw_mmu
1393# define helper_ret_stl_mmu   helper_le_stl_mmu
1394# define helper_ret_stq_mmu   helper_le_stq_mmu
1395# define helper_ret_ldw_cmmu  helper_le_ldw_cmmu
1396# define helper_ret_ldl_cmmu  helper_le_ldl_cmmu
1397# define helper_ret_ldq_cmmu  helper_le_ldq_cmmu
1398#endif
1399
1400uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
1401                                    uint32_t cmpv, uint32_t newv,
1402                                    TCGMemOpIdx oi, uintptr_t retaddr);
1403uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
1404                                       uint32_t cmpv, uint32_t newv,
1405                                       TCGMemOpIdx oi, uintptr_t retaddr);
1406uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
1407                                       uint32_t cmpv, uint32_t newv,
1408                                       TCGMemOpIdx oi, uintptr_t retaddr);
1409uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
1410                                       uint64_t cmpv, uint64_t newv,
1411                                       TCGMemOpIdx oi, uintptr_t retaddr);
1412uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
1413                                       uint32_t cmpv, uint32_t newv,
1414                                       TCGMemOpIdx oi, uintptr_t retaddr);
1415uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
1416                                       uint32_t cmpv, uint32_t newv,
1417                                       TCGMemOpIdx oi, uintptr_t retaddr);
1418uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
1419                                       uint64_t cmpv, uint64_t newv,
1420                                       TCGMemOpIdx oi, uintptr_t retaddr);
1421
1422#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX)         \
1423TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu         \
1424    (CPUArchState *env, target_ulong addr, TYPE val,  \
1425     TCGMemOpIdx oi, uintptr_t retaddr);
1426
1427#ifdef CONFIG_ATOMIC64
1428#define GEN_ATOMIC_HELPER_ALL(NAME)          \
1429    GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
1430    GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
1431    GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
1432    GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
1433    GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)  \
1434    GEN_ATOMIC_HELPER(NAME, uint64_t, q_le)  \
1435    GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
1436#else
1437#define GEN_ATOMIC_HELPER_ALL(NAME)          \
1438    GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
1439    GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
1440    GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
1441    GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
1442    GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
1443#endif
1444
1445GEN_ATOMIC_HELPER_ALL(fetch_add)
1446GEN_ATOMIC_HELPER_ALL(fetch_sub)
1447GEN_ATOMIC_HELPER_ALL(fetch_and)
1448GEN_ATOMIC_HELPER_ALL(fetch_or)
1449GEN_ATOMIC_HELPER_ALL(fetch_xor)
1450GEN_ATOMIC_HELPER_ALL(fetch_smin)
1451GEN_ATOMIC_HELPER_ALL(fetch_umin)
1452GEN_ATOMIC_HELPER_ALL(fetch_smax)
1453GEN_ATOMIC_HELPER_ALL(fetch_umax)
1454
1455GEN_ATOMIC_HELPER_ALL(add_fetch)
1456GEN_ATOMIC_HELPER_ALL(sub_fetch)
1457GEN_ATOMIC_HELPER_ALL(and_fetch)
1458GEN_ATOMIC_HELPER_ALL(or_fetch)
1459GEN_ATOMIC_HELPER_ALL(xor_fetch)
1460GEN_ATOMIC_HELPER_ALL(smin_fetch)
1461GEN_ATOMIC_HELPER_ALL(umin_fetch)
1462GEN_ATOMIC_HELPER_ALL(smax_fetch)
1463GEN_ATOMIC_HELPER_ALL(umax_fetch)
1464
1465GEN_ATOMIC_HELPER_ALL(xchg)
1466
1467#undef GEN_ATOMIC_HELPER_ALL
1468#undef GEN_ATOMIC_HELPER
1469#endif /* CONFIG_SOFTMMU */
1470
1471/*
1472 * These aren't really a "proper" helpers because TCG cannot manage Int128.
1473 * However, use the same format as the others, for use by the backends.
1474 *
1475 * The cmpxchg functions are only defined if HAVE_CMPXCHG128;
1476 * the ld/st functions are only defined if HAVE_ATOMIC128,
1477 * as defined by <qemu/atomic128.h>.
1478 */
1479Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
1480                                     Int128 cmpv, Int128 newv,
1481                                     TCGMemOpIdx oi, uintptr_t retaddr);
1482Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
1483                                     Int128 cmpv, Int128 newv,
1484                                     TCGMemOpIdx oi, uintptr_t retaddr);
1485
1486Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
1487                                TCGMemOpIdx oi, uintptr_t retaddr);
1488Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
1489                                TCGMemOpIdx oi, uintptr_t retaddr);
1490void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1491                              TCGMemOpIdx oi, uintptr_t retaddr);
1492void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1493                              TCGMemOpIdx oi, uintptr_t retaddr);
1494
1495#endif /* TCG_H */
1496