qemu/tcg/tcg.c
<<
>>
Prefs
   1/*
   2 * Tiny Code Generator for QEMU
   3 *
   4 * Copyright (c) 2008 Fabrice Bellard
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25/* define it to use liveness analysis (better code) */
  26#define USE_LIVENESS_ANALYSIS
  27#define USE_TCG_OPTIMIZATIONS
  28
  29#include "qemu/osdep.h"
  30
  31/* Define to jump the ELF file used to communicate with GDB.  */
  32#undef DEBUG_JIT
  33
  34#include "qemu/cutils.h"
  35#include "qemu/host-utils.h"
  36#include "qemu/timer.h"
  37
  38/* Note: the long term plan is to reduce the dependencies on the QEMU
  39   CPU definitions. Currently they are used for qemu_ld/st
  40   instructions */
  41#define NO_CPU_IO_DEFS
  42#include "cpu.h"
  43
  44#include "tcg-op.h"
  45
  46#if UINTPTR_MAX == UINT32_MAX
  47# define ELF_CLASS  ELFCLASS32
  48#else
  49# define ELF_CLASS  ELFCLASS64
  50#endif
  51#ifdef HOST_WORDS_BIGENDIAN
  52# define ELF_DATA   ELFDATA2MSB
  53#else
  54# define ELF_DATA   ELFDATA2LSB
  55#endif
  56
  57#include "elf.h"
  58#include "exec/log.h"
  59
  60/* Forward declarations for functions declared in tcg-target.inc.c and
  61   used here. */
  62static void tcg_target_init(TCGContext *s);
  63static void tcg_target_qemu_prologue(TCGContext *s);
  64static void patch_reloc(tcg_insn_unit *code_ptr, int type,
  65                        intptr_t value, intptr_t addend);
  66
  67/* The CIE and FDE header definitions will be common to all hosts.  */
  68typedef struct {
  69    uint32_t len __attribute__((aligned((sizeof(void *)))));
  70    uint32_t id;
  71    uint8_t version;
  72    char augmentation[1];
  73    uint8_t code_align;
  74    uint8_t data_align;
  75    uint8_t return_column;
  76} DebugFrameCIE;
  77
  78typedef struct QEMU_PACKED {
  79    uint32_t len __attribute__((aligned((sizeof(void *)))));
  80    uint32_t cie_offset;
  81    uintptr_t func_start;
  82    uintptr_t func_len;
  83} DebugFrameFDEHeader;
  84
  85typedef struct QEMU_PACKED {
  86    DebugFrameCIE cie;
  87    DebugFrameFDEHeader fde;
  88} DebugFrameHeader;
  89
  90static void tcg_register_jit_int(void *buf, size_t size,
  91                                 const void *debug_frame,
  92                                 size_t debug_frame_size)
  93    __attribute__((unused));
  94
  95/* Forward declarations for functions declared and used in tcg-target.inc.c. */
  96static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
  97static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
  98                       intptr_t arg2);
  99static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
 100static void tcg_out_movi(TCGContext *s, TCGType type,
 101                         TCGReg ret, tcg_target_long arg);
 102static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
 103                       const int *const_args);
 104static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
 105                       intptr_t arg2);
 106static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
 107static int tcg_target_const_match(tcg_target_long val, TCGType type,
 108                                  const TCGArgConstraint *arg_ct);
 109static void tcg_out_tb_init(TCGContext *s);
 110static bool tcg_out_tb_finalize(TCGContext *s);
 111
 112
 113
 114static TCGRegSet tcg_target_available_regs[2];
 115static TCGRegSet tcg_target_call_clobber_regs;
 116
 117#if TCG_TARGET_INSN_UNIT_SIZE == 1
 118static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
 119{
 120    *s->code_ptr++ = v;
 121}
 122
 123static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
 124                                                      uint8_t v)
 125{
 126    *p = v;
 127}
 128#endif
 129
 130#if TCG_TARGET_INSN_UNIT_SIZE <= 2
 131static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
 132{
 133    if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
 134        *s->code_ptr++ = v;
 135    } else {
 136        tcg_insn_unit *p = s->code_ptr;
 137        memcpy(p, &v, sizeof(v));
 138        s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
 139    }
 140}
 141
 142static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
 143                                                       uint16_t v)
 144{
 145    if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
 146        *p = v;
 147    } else {
 148        memcpy(p, &v, sizeof(v));
 149    }
 150}
 151#endif
 152
 153#if TCG_TARGET_INSN_UNIT_SIZE <= 4
 154static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
 155{
 156    if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
 157        *s->code_ptr++ = v;
 158    } else {
 159        tcg_insn_unit *p = s->code_ptr;
 160        memcpy(p, &v, sizeof(v));
 161        s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
 162    }
 163}
 164
 165static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
 166                                                       uint32_t v)
 167{
 168    if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
 169        *p = v;
 170    } else {
 171        memcpy(p, &v, sizeof(v));
 172    }
 173}
 174#endif
 175
 176#if TCG_TARGET_INSN_UNIT_SIZE <= 8
 177static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
 178{
 179    if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
 180        *s->code_ptr++ = v;
 181    } else {
 182        tcg_insn_unit *p = s->code_ptr;
 183        memcpy(p, &v, sizeof(v));
 184        s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
 185    }
 186}
 187
 188static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
 189                                                       uint64_t v)
 190{
 191    if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
 192        *p = v;
 193    } else {
 194        memcpy(p, &v, sizeof(v));
 195    }
 196}
 197#endif
 198
 199/* label relocation processing */
 200
 201static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
 202                          TCGLabel *l, intptr_t addend)
 203{
 204    TCGRelocation *r;
 205
 206    if (l->has_value) {
 207        /* FIXME: This may break relocations on RISC targets that
 208           modify instruction fields in place.  The caller may not have 
 209           written the initial value.  */
 210        patch_reloc(code_ptr, type, l->u.value, addend);
 211    } else {
 212        /* add a new relocation entry */
 213        r = tcg_malloc(sizeof(TCGRelocation));
 214        r->type = type;
 215        r->ptr = code_ptr;
 216        r->addend = addend;
 217        r->next = l->u.first_reloc;
 218        l->u.first_reloc = r;
 219    }
 220}
 221
 222static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
 223{
 224    intptr_t value = (intptr_t)ptr;
 225    TCGRelocation *r;
 226
 227    tcg_debug_assert(!l->has_value);
 228
 229    for (r = l->u.first_reloc; r != NULL; r = r->next) {
 230        patch_reloc(r->ptr, r->type, value, r->addend);
 231    }
 232
 233    l->has_value = 1;
 234    l->u.value_ptr = ptr;
 235}
 236
 237TCGLabel *gen_new_label(void)
 238{
 239    TCGContext *s = &tcg_ctx;
 240    TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
 241
 242    *l = (TCGLabel){
 243        .id = s->nb_labels++
 244    };
 245
 246    return l;
 247}
 248
 249#include "tcg-target.inc.c"
 250
 251/* pool based memory allocation */
 252void *tcg_malloc_internal(TCGContext *s, int size)
 253{
 254    TCGPool *p;
 255    int pool_size;
 256    
 257    if (size > TCG_POOL_CHUNK_SIZE) {
 258        /* big malloc: insert a new pool (XXX: could optimize) */
 259        p = g_malloc(sizeof(TCGPool) + size);
 260        p->size = size;
 261        p->next = s->pool_first_large;
 262        s->pool_first_large = p;
 263        return p->data;
 264    } else {
 265        p = s->pool_current;
 266        if (!p) {
 267            p = s->pool_first;
 268            if (!p)
 269                goto new_pool;
 270        } else {
 271            if (!p->next) {
 272            new_pool:
 273                pool_size = TCG_POOL_CHUNK_SIZE;
 274                p = g_malloc(sizeof(TCGPool) + pool_size);
 275                p->size = pool_size;
 276                p->next = NULL;
 277                if (s->pool_current) 
 278                    s->pool_current->next = p;
 279                else
 280                    s->pool_first = p;
 281            } else {
 282                p = p->next;
 283            }
 284        }
 285    }
 286    s->pool_current = p;
 287    s->pool_cur = p->data + size;
 288    s->pool_end = p->data + p->size;
 289    return p->data;
 290}
 291
 292void tcg_pool_reset(TCGContext *s)
 293{
 294    TCGPool *p, *t;
 295    for (p = s->pool_first_large; p; p = t) {
 296        t = p->next;
 297        g_free(p);
 298    }
 299    s->pool_first_large = NULL;
 300    s->pool_cur = s->pool_end = NULL;
 301    s->pool_current = NULL;
 302}
 303
 304typedef struct TCGHelperInfo {
 305    void *func;
 306    const char *name;
 307    unsigned flags;
 308    unsigned sizemask;
 309} TCGHelperInfo;
 310
 311#include "exec/helper-proto.h"
 312
 313static const TCGHelperInfo all_helpers[] = {
 314#include "exec/helper-tcg.h"
 315};
 316
 317static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
 318
 319void tcg_context_init(TCGContext *s)
 320{
 321    int op, total_args, n, i;
 322    TCGOpDef *def;
 323    TCGArgConstraint *args_ct;
 324    int *sorted_args;
 325    GHashTable *helper_table;
 326
 327    memset(s, 0, sizeof(*s));
 328    s->nb_globals = 0;
 329    
 330    /* Count total number of arguments and allocate the corresponding
 331       space */
 332    total_args = 0;
 333    for(op = 0; op < NB_OPS; op++) {
 334        def = &tcg_op_defs[op];
 335        n = def->nb_iargs + def->nb_oargs;
 336        total_args += n;
 337    }
 338
 339    args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
 340    sorted_args = g_malloc(sizeof(int) * total_args);
 341
 342    for(op = 0; op < NB_OPS; op++) {
 343        def = &tcg_op_defs[op];
 344        def->args_ct = args_ct;
 345        def->sorted_args = sorted_args;
 346        n = def->nb_iargs + def->nb_oargs;
 347        sorted_args += n;
 348        args_ct += n;
 349    }
 350
 351    /* Register helpers.  */
 352    /* Use g_direct_hash/equal for direct pointer comparisons on func.  */
 353    s->helpers = helper_table = g_hash_table_new(NULL, NULL);
 354
 355    for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
 356        g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
 357                            (gpointer)&all_helpers[i]);
 358    }
 359
 360    tcg_target_init(s);
 361
 362    /* Reverse the order of the saved registers, assuming they're all at
 363       the start of tcg_target_reg_alloc_order.  */
 364    for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
 365        int r = tcg_target_reg_alloc_order[n];
 366        if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
 367            break;
 368        }
 369    }
 370    for (i = 0; i < n; ++i) {
 371        indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
 372    }
 373    for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
 374        indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
 375    }
 376}
 377
 378void tcg_prologue_init(TCGContext *s)
 379{
 380    size_t prologue_size, total_size;
 381    void *buf0, *buf1;
 382
 383    /* Put the prologue at the beginning of code_gen_buffer.  */
 384    buf0 = s->code_gen_buffer;
 385    s->code_ptr = buf0;
 386    s->code_buf = buf0;
 387    s->code_gen_prologue = buf0;
 388
 389    /* Generate the prologue.  */
 390    tcg_target_qemu_prologue(s);
 391    buf1 = s->code_ptr;
 392    flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
 393
 394    /* Deduct the prologue from the buffer.  */
 395    prologue_size = tcg_current_code_size(s);
 396    s->code_gen_ptr = buf1;
 397    s->code_gen_buffer = buf1;
 398    s->code_buf = buf1;
 399    total_size = s->code_gen_buffer_size - prologue_size;
 400    s->code_gen_buffer_size = total_size;
 401
 402    /* Compute a high-water mark, at which we voluntarily flush the buffer
 403       and start over.  The size here is arbitrary, significantly larger
 404       than we expect the code generation for any one opcode to require.  */
 405    s->code_gen_highwater = s->code_gen_buffer + (total_size - 1024);
 406
 407    tcg_register_jit(s->code_gen_buffer, total_size);
 408
 409#ifdef DEBUG_DISAS
 410    if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
 411        qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
 412        log_disas(buf0, prologue_size);
 413        qemu_log("\n");
 414        qemu_log_flush();
 415    }
 416#endif
 417}
 418
 419void tcg_func_start(TCGContext *s)
 420{
 421    tcg_pool_reset(s);
 422    s->nb_temps = s->nb_globals;
 423
 424    /* No temps have been previously allocated for size or locality.  */
 425    memset(s->free_temps, 0, sizeof(s->free_temps));
 426
 427    s->nb_labels = 0;
 428    s->current_frame_offset = s->frame_start;
 429
 430#ifdef CONFIG_DEBUG_TCG
 431    s->goto_tb_issue_mask = 0;
 432#endif
 433
 434    s->gen_first_op_idx = 0;
 435    s->gen_last_op_idx = -1;
 436    s->gen_next_op_idx = 0;
 437    s->gen_next_parm_idx = 0;
 438
 439    s->be = tcg_malloc(sizeof(TCGBackendData));
 440}
 441
 442static inline int temp_idx(TCGContext *s, TCGTemp *ts)
 443{
 444    ptrdiff_t n = ts - s->temps;
 445    tcg_debug_assert(n >= 0 && n < s->nb_temps);
 446    return n;
 447}
 448
 449static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
 450{
 451    int n = s->nb_temps++;
 452    tcg_debug_assert(n < TCG_MAX_TEMPS);
 453    return memset(&s->temps[n], 0, sizeof(TCGTemp));
 454}
 455
 456static inline TCGTemp *tcg_global_alloc(TCGContext *s)
 457{
 458    tcg_debug_assert(s->nb_globals == s->nb_temps);
 459    s->nb_globals++;
 460    return tcg_temp_alloc(s);
 461}
 462
 463static int tcg_global_reg_new_internal(TCGContext *s, TCGType type,
 464                                       TCGReg reg, const char *name)
 465{
 466    TCGTemp *ts;
 467
 468    if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
 469        tcg_abort();
 470    }
 471
 472    ts = tcg_global_alloc(s);
 473    ts->base_type = type;
 474    ts->type = type;
 475    ts->fixed_reg = 1;
 476    ts->reg = reg;
 477    ts->name = name;
 478    tcg_regset_set_reg(s->reserved_regs, reg);
 479
 480    return temp_idx(s, ts);
 481}
 482
 483void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
 484{
 485    int idx;
 486    s->frame_start = start;
 487    s->frame_end = start + size;
 488    idx = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
 489    s->frame_temp = &s->temps[idx];
 490}
 491
 492TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name)
 493{
 494    TCGContext *s = &tcg_ctx;
 495    int idx;
 496
 497    if (tcg_regset_test_reg(s->reserved_regs, reg)) {
 498        tcg_abort();
 499    }
 500    idx = tcg_global_reg_new_internal(s, TCG_TYPE_I32, reg, name);
 501    return MAKE_TCGV_I32(idx);
 502}
 503
 504TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name)
 505{
 506    TCGContext *s = &tcg_ctx;
 507    int idx;
 508
 509    if (tcg_regset_test_reg(s->reserved_regs, reg)) {
 510        tcg_abort();
 511    }
 512    idx = tcg_global_reg_new_internal(s, TCG_TYPE_I64, reg, name);
 513    return MAKE_TCGV_I64(idx);
 514}
 515
 516int tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
 517                                intptr_t offset, const char *name)
 518{
 519    TCGContext *s = &tcg_ctx;
 520    TCGTemp *base_ts = &s->temps[GET_TCGV_PTR(base)];
 521    TCGTemp *ts = tcg_global_alloc(s);
 522    int indirect_reg = 0, bigendian = 0;
 523#ifdef HOST_WORDS_BIGENDIAN
 524    bigendian = 1;
 525#endif
 526
 527    if (!base_ts->fixed_reg) {
 528        indirect_reg = 1;
 529        base_ts->indirect_base = 1;
 530    }
 531
 532    if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
 533        TCGTemp *ts2 = tcg_global_alloc(s);
 534        char buf[64];
 535
 536        ts->base_type = TCG_TYPE_I64;
 537        ts->type = TCG_TYPE_I32;
 538        ts->indirect_reg = indirect_reg;
 539        ts->mem_allocated = 1;
 540        ts->mem_base = base_ts;
 541        ts->mem_offset = offset + bigendian * 4;
 542        pstrcpy(buf, sizeof(buf), name);
 543        pstrcat(buf, sizeof(buf), "_0");
 544        ts->name = strdup(buf);
 545
 546        tcg_debug_assert(ts2 == ts + 1);
 547        ts2->base_type = TCG_TYPE_I64;
 548        ts2->type = TCG_TYPE_I32;
 549        ts2->indirect_reg = indirect_reg;
 550        ts2->mem_allocated = 1;
 551        ts2->mem_base = base_ts;
 552        ts2->mem_offset = offset + (1 - bigendian) * 4;
 553        pstrcpy(buf, sizeof(buf), name);
 554        pstrcat(buf, sizeof(buf), "_1");
 555        ts->name = strdup(buf);
 556    } else {
 557        ts->base_type = type;
 558        ts->type = type;
 559        ts->indirect_reg = indirect_reg;
 560        ts->mem_allocated = 1;
 561        ts->mem_base = base_ts;
 562        ts->mem_offset = offset;
 563        ts->name = name;
 564    }
 565    return temp_idx(s, ts);
 566}
 567
 568static int tcg_temp_new_internal(TCGType type, int temp_local)
 569{
 570    TCGContext *s = &tcg_ctx;
 571    TCGTemp *ts;
 572    int idx, k;
 573
 574    k = type + (temp_local ? TCG_TYPE_COUNT : 0);
 575    idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
 576    if (idx < TCG_MAX_TEMPS) {
 577        /* There is already an available temp with the right type.  */
 578        clear_bit(idx, s->free_temps[k].l);
 579
 580        ts = &s->temps[idx];
 581        ts->temp_allocated = 1;
 582        tcg_debug_assert(ts->base_type == type);
 583        tcg_debug_assert(ts->temp_local == temp_local);
 584    } else {
 585        ts = tcg_temp_alloc(s);
 586        if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
 587            TCGTemp *ts2 = tcg_temp_alloc(s);
 588
 589            ts->base_type = type;
 590            ts->type = TCG_TYPE_I32;
 591            ts->temp_allocated = 1;
 592            ts->temp_local = temp_local;
 593
 594            tcg_debug_assert(ts2 == ts + 1);
 595            ts2->base_type = TCG_TYPE_I64;
 596            ts2->type = TCG_TYPE_I32;
 597            ts2->temp_allocated = 1;
 598            ts2->temp_local = temp_local;
 599        } else {
 600            ts->base_type = type;
 601            ts->type = type;
 602            ts->temp_allocated = 1;
 603            ts->temp_local = temp_local;
 604        }
 605        idx = temp_idx(s, ts);
 606    }
 607
 608#if defined(CONFIG_DEBUG_TCG)
 609    s->temps_in_use++;
 610#endif
 611    return idx;
 612}
 613
 614TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
 615{
 616    int idx;
 617
 618    idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
 619    return MAKE_TCGV_I32(idx);
 620}
 621
 622TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
 623{
 624    int idx;
 625
 626    idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
 627    return MAKE_TCGV_I64(idx);
 628}
 629
 630static void tcg_temp_free_internal(int idx)
 631{
 632    TCGContext *s = &tcg_ctx;
 633    TCGTemp *ts;
 634    int k;
 635
 636#if defined(CONFIG_DEBUG_TCG)
 637    s->temps_in_use--;
 638    if (s->temps_in_use < 0) {
 639        fprintf(stderr, "More temporaries freed than allocated!\n");
 640    }
 641#endif
 642
 643    tcg_debug_assert(idx >= s->nb_globals && idx < s->nb_temps);
 644    ts = &s->temps[idx];
 645    tcg_debug_assert(ts->temp_allocated != 0);
 646    ts->temp_allocated = 0;
 647
 648    k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
 649    set_bit(idx, s->free_temps[k].l);
 650}
 651
 652void tcg_temp_free_i32(TCGv_i32 arg)
 653{
 654    tcg_temp_free_internal(GET_TCGV_I32(arg));
 655}
 656
 657void tcg_temp_free_i64(TCGv_i64 arg)
 658{
 659    tcg_temp_free_internal(GET_TCGV_I64(arg));
 660}
 661
 662TCGv_i32 tcg_const_i32(int32_t val)
 663{
 664    TCGv_i32 t0;
 665    t0 = tcg_temp_new_i32();
 666    tcg_gen_movi_i32(t0, val);
 667    return t0;
 668}
 669
 670TCGv_i64 tcg_const_i64(int64_t val)
 671{
 672    TCGv_i64 t0;
 673    t0 = tcg_temp_new_i64();
 674    tcg_gen_movi_i64(t0, val);
 675    return t0;
 676}
 677
 678TCGv_i32 tcg_const_local_i32(int32_t val)
 679{
 680    TCGv_i32 t0;
 681    t0 = tcg_temp_local_new_i32();
 682    tcg_gen_movi_i32(t0, val);
 683    return t0;
 684}
 685
 686TCGv_i64 tcg_const_local_i64(int64_t val)
 687{
 688    TCGv_i64 t0;
 689    t0 = tcg_temp_local_new_i64();
 690    tcg_gen_movi_i64(t0, val);
 691    return t0;
 692}
 693
 694#if defined(CONFIG_DEBUG_TCG)
 695void tcg_clear_temp_count(void)
 696{
 697    TCGContext *s = &tcg_ctx;
 698    s->temps_in_use = 0;
 699}
 700
 701int tcg_check_temp_count(void)
 702{
 703    TCGContext *s = &tcg_ctx;
 704    if (s->temps_in_use) {
 705        /* Clear the count so that we don't give another
 706         * warning immediately next time around.
 707         */
 708        s->temps_in_use = 0;
 709        return 1;
 710    }
 711    return 0;
 712}
 713#endif
 714
 715/* Note: we convert the 64 bit args to 32 bit and do some alignment
 716   and endian swap. Maybe it would be better to do the alignment
 717   and endian swap in tcg_reg_alloc_call(). */
 718void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
 719                   int nargs, TCGArg *args)
 720{
 721    int i, real_args, nb_rets, pi, pi_first;
 722    unsigned sizemask, flags;
 723    TCGHelperInfo *info;
 724
 725    info = g_hash_table_lookup(s->helpers, (gpointer)func);
 726    flags = info->flags;
 727    sizemask = info->sizemask;
 728
 729#if defined(__sparc__) && !defined(__arch64__) \
 730    && !defined(CONFIG_TCG_INTERPRETER)
 731    /* We have 64-bit values in one register, but need to pass as two
 732       separate parameters.  Split them.  */
 733    int orig_sizemask = sizemask;
 734    int orig_nargs = nargs;
 735    TCGv_i64 retl, reth;
 736
 737    TCGV_UNUSED_I64(retl);
 738    TCGV_UNUSED_I64(reth);
 739    if (sizemask != 0) {
 740        TCGArg *split_args = __builtin_alloca(sizeof(TCGArg) * nargs * 2);
 741        for (i = real_args = 0; i < nargs; ++i) {
 742            int is_64bit = sizemask & (1 << (i+1)*2);
 743            if (is_64bit) {
 744                TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
 745                TCGv_i32 h = tcg_temp_new_i32();
 746                TCGv_i32 l = tcg_temp_new_i32();
 747                tcg_gen_extr_i64_i32(l, h, orig);
 748                split_args[real_args++] = GET_TCGV_I32(h);
 749                split_args[real_args++] = GET_TCGV_I32(l);
 750            } else {
 751                split_args[real_args++] = args[i];
 752            }
 753        }
 754        nargs = real_args;
 755        args = split_args;
 756        sizemask = 0;
 757    }
 758#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
 759    for (i = 0; i < nargs; ++i) {
 760        int is_64bit = sizemask & (1 << (i+1)*2);
 761        int is_signed = sizemask & (2 << (i+1)*2);
 762        if (!is_64bit) {
 763            TCGv_i64 temp = tcg_temp_new_i64();
 764            TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
 765            if (is_signed) {
 766                tcg_gen_ext32s_i64(temp, orig);
 767            } else {
 768                tcg_gen_ext32u_i64(temp, orig);
 769            }
 770            args[i] = GET_TCGV_I64(temp);
 771        }
 772    }
 773#endif /* TCG_TARGET_EXTEND_ARGS */
 774
 775    pi_first = pi = s->gen_next_parm_idx;
 776    if (ret != TCG_CALL_DUMMY_ARG) {
 777#if defined(__sparc__) && !defined(__arch64__) \
 778    && !defined(CONFIG_TCG_INTERPRETER)
 779        if (orig_sizemask & 1) {
 780            /* The 32-bit ABI is going to return the 64-bit value in
 781               the %o0/%o1 register pair.  Prepare for this by using
 782               two return temporaries, and reassemble below.  */
 783            retl = tcg_temp_new_i64();
 784            reth = tcg_temp_new_i64();
 785            s->gen_opparam_buf[pi++] = GET_TCGV_I64(reth);
 786            s->gen_opparam_buf[pi++] = GET_TCGV_I64(retl);
 787            nb_rets = 2;
 788        } else {
 789            s->gen_opparam_buf[pi++] = ret;
 790            nb_rets = 1;
 791        }
 792#else
 793        if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
 794#ifdef HOST_WORDS_BIGENDIAN
 795            s->gen_opparam_buf[pi++] = ret + 1;
 796            s->gen_opparam_buf[pi++] = ret;
 797#else
 798            s->gen_opparam_buf[pi++] = ret;
 799            s->gen_opparam_buf[pi++] = ret + 1;
 800#endif
 801            nb_rets = 2;
 802        } else {
 803            s->gen_opparam_buf[pi++] = ret;
 804            nb_rets = 1;
 805        }
 806#endif
 807    } else {
 808        nb_rets = 0;
 809    }
 810    real_args = 0;
 811    for (i = 0; i < nargs; i++) {
 812        int is_64bit = sizemask & (1 << (i+1)*2);
 813        if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
 814#ifdef TCG_TARGET_CALL_ALIGN_ARGS
 815            /* some targets want aligned 64 bit args */
 816            if (real_args & 1) {
 817                s->gen_opparam_buf[pi++] = TCG_CALL_DUMMY_ARG;
 818                real_args++;
 819            }
 820#endif
 821            /* If stack grows up, then we will be placing successive
 822               arguments at lower addresses, which means we need to
 823               reverse the order compared to how we would normally
 824               treat either big or little-endian.  For those arguments
 825               that will wind up in registers, this still works for
 826               HPPA (the only current STACK_GROWSUP target) since the
 827               argument registers are *also* allocated in decreasing
 828               order.  If another such target is added, this logic may
 829               have to get more complicated to differentiate between
 830               stack arguments and register arguments.  */
 831#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
 832            s->gen_opparam_buf[pi++] = args[i] + 1;
 833            s->gen_opparam_buf[pi++] = args[i];
 834#else
 835            s->gen_opparam_buf[pi++] = args[i];
 836            s->gen_opparam_buf[pi++] = args[i] + 1;
 837#endif
 838            real_args += 2;
 839            continue;
 840        }
 841
 842        s->gen_opparam_buf[pi++] = args[i];
 843        real_args++;
 844    }
 845    s->gen_opparam_buf[pi++] = (uintptr_t)func;
 846    s->gen_opparam_buf[pi++] = flags;
 847
 848    i = s->gen_next_op_idx;
 849    tcg_debug_assert(i < OPC_BUF_SIZE);
 850    tcg_debug_assert(pi <= OPPARAM_BUF_SIZE);
 851
 852    /* Set links for sequential allocation during translation.  */
 853    s->gen_op_buf[i] = (TCGOp){
 854        .opc = INDEX_op_call,
 855        .callo = nb_rets,
 856        .calli = real_args,
 857        .args = pi_first,
 858        .prev = i - 1,
 859        .next = i + 1
 860    };
 861
 862    /* Make sure the calli field didn't overflow.  */
 863    tcg_debug_assert(s->gen_op_buf[i].calli == real_args);
 864
 865    s->gen_last_op_idx = i;
 866    s->gen_next_op_idx = i + 1;
 867    s->gen_next_parm_idx = pi;
 868
 869#if defined(__sparc__) && !defined(__arch64__) \
 870    && !defined(CONFIG_TCG_INTERPRETER)
 871    /* Free all of the parts we allocated above.  */
 872    for (i = real_args = 0; i < orig_nargs; ++i) {
 873        int is_64bit = orig_sizemask & (1 << (i+1)*2);
 874        if (is_64bit) {
 875            TCGv_i32 h = MAKE_TCGV_I32(args[real_args++]);
 876            TCGv_i32 l = MAKE_TCGV_I32(args[real_args++]);
 877            tcg_temp_free_i32(h);
 878            tcg_temp_free_i32(l);
 879        } else {
 880            real_args++;
 881        }
 882    }
 883    if (orig_sizemask & 1) {
 884        /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
 885           Note that describing these as TCGv_i64 eliminates an unnecessary
 886           zero-extension that tcg_gen_concat_i32_i64 would create.  */
 887        tcg_gen_concat32_i64(MAKE_TCGV_I64(ret), retl, reth);
 888        tcg_temp_free_i64(retl);
 889        tcg_temp_free_i64(reth);
 890    }
 891#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
 892    for (i = 0; i < nargs; ++i) {
 893        int is_64bit = sizemask & (1 << (i+1)*2);
 894        if (!is_64bit) {
 895            TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
 896            tcg_temp_free_i64(temp);
 897        }
 898    }
 899#endif /* TCG_TARGET_EXTEND_ARGS */
 900}
 901
 902static void tcg_reg_alloc_start(TCGContext *s)
 903{
 904    int i;
 905    TCGTemp *ts;
 906    for(i = 0; i < s->nb_globals; i++) {
 907        ts = &s->temps[i];
 908        if (ts->fixed_reg) {
 909            ts->val_type = TEMP_VAL_REG;
 910        } else {
 911            ts->val_type = TEMP_VAL_MEM;
 912        }
 913    }
 914    for(i = s->nb_globals; i < s->nb_temps; i++) {
 915        ts = &s->temps[i];
 916        if (ts->temp_local) {
 917            ts->val_type = TEMP_VAL_MEM;
 918        } else {
 919            ts->val_type = TEMP_VAL_DEAD;
 920        }
 921        ts->mem_allocated = 0;
 922        ts->fixed_reg = 0;
 923    }
 924
 925    memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
 926}
 927
 928static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
 929                                 TCGTemp *ts)
 930{
 931    int idx = temp_idx(s, ts);
 932
 933    if (idx < s->nb_globals) {
 934        pstrcpy(buf, buf_size, ts->name);
 935    } else if (ts->temp_local) {
 936        snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
 937    } else {
 938        snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
 939    }
 940    return buf;
 941}
 942
 943static char *tcg_get_arg_str_idx(TCGContext *s, char *buf,
 944                                 int buf_size, int idx)
 945{
 946    tcg_debug_assert(idx >= 0 && idx < s->nb_temps);
 947    return tcg_get_arg_str_ptr(s, buf, buf_size, &s->temps[idx]);
 948}
 949
 950/* Find helper name.  */
 951static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
 952{
 953    const char *ret = NULL;
 954    if (s->helpers) {
 955        TCGHelperInfo *info = g_hash_table_lookup(s->helpers, (gpointer)val);
 956        if (info) {
 957            ret = info->name;
 958        }
 959    }
 960    return ret;
 961}
 962
 963static const char * const cond_name[] =
 964{
 965    [TCG_COND_NEVER] = "never",
 966    [TCG_COND_ALWAYS] = "always",
 967    [TCG_COND_EQ] = "eq",
 968    [TCG_COND_NE] = "ne",
 969    [TCG_COND_LT] = "lt",
 970    [TCG_COND_GE] = "ge",
 971    [TCG_COND_LE] = "le",
 972    [TCG_COND_GT] = "gt",
 973    [TCG_COND_LTU] = "ltu",
 974    [TCG_COND_GEU] = "geu",
 975    [TCG_COND_LEU] = "leu",
 976    [TCG_COND_GTU] = "gtu"
 977};
 978
 979static const char * const ldst_name[] =
 980{
 981    [MO_UB]   = "ub",
 982    [MO_SB]   = "sb",
 983    [MO_LEUW] = "leuw",
 984    [MO_LESW] = "lesw",
 985    [MO_LEUL] = "leul",
 986    [MO_LESL] = "lesl",
 987    [MO_LEQ]  = "leq",
 988    [MO_BEUW] = "beuw",
 989    [MO_BESW] = "besw",
 990    [MO_BEUL] = "beul",
 991    [MO_BESL] = "besl",
 992    [MO_BEQ]  = "beq",
 993};
 994
 995void tcg_dump_ops(TCGContext *s)
 996{
 997    char buf[128];
 998    TCGOp *op;
 999    int oi;
1000
1001    for (oi = s->gen_first_op_idx; oi >= 0; oi = op->next) {
1002        int i, k, nb_oargs, nb_iargs, nb_cargs;
1003        const TCGOpDef *def;
1004        const TCGArg *args;
1005        TCGOpcode c;
1006
1007        op = &s->gen_op_buf[oi];
1008        c = op->opc;
1009        def = &tcg_op_defs[c];
1010        args = &s->gen_opparam_buf[op->args];
1011
1012        if (c == INDEX_op_insn_start) {
1013            qemu_log("%s ----", oi != s->gen_first_op_idx ? "\n" : "");
1014
1015            for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1016                target_ulong a;
1017#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1018                a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
1019#else
1020                a = args[i];
1021#endif
1022                qemu_log(" " TARGET_FMT_lx, a);
1023            }
1024        } else if (c == INDEX_op_call) {
1025            /* variable number of arguments */
1026            nb_oargs = op->callo;
1027            nb_iargs = op->calli;
1028            nb_cargs = def->nb_cargs;
1029
1030            /* function name, flags, out args */
1031            qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1032                     tcg_find_helper(s, args[nb_oargs + nb_iargs]),
1033                     args[nb_oargs + nb_iargs + 1], nb_oargs);
1034            for (i = 0; i < nb_oargs; i++) {
1035                qemu_log(",%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1036                                                   args[i]));
1037            }
1038            for (i = 0; i < nb_iargs; i++) {
1039                TCGArg arg = args[nb_oargs + i];
1040                const char *t = "<dummy>";
1041                if (arg != TCG_CALL_DUMMY_ARG) {
1042                    t = tcg_get_arg_str_idx(s, buf, sizeof(buf), arg);
1043                }
1044                qemu_log(",%s", t);
1045            }
1046        } else {
1047            qemu_log(" %s ", def->name);
1048
1049            nb_oargs = def->nb_oargs;
1050            nb_iargs = def->nb_iargs;
1051            nb_cargs = def->nb_cargs;
1052
1053            k = 0;
1054            for (i = 0; i < nb_oargs; i++) {
1055                if (k != 0) {
1056                    qemu_log(",");
1057                }
1058                qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1059                                                   args[k++]));
1060            }
1061            for (i = 0; i < nb_iargs; i++) {
1062                if (k != 0) {
1063                    qemu_log(",");
1064                }
1065                qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1066                                                   args[k++]));
1067            }
1068            switch (c) {
1069            case INDEX_op_brcond_i32:
1070            case INDEX_op_setcond_i32:
1071            case INDEX_op_movcond_i32:
1072            case INDEX_op_brcond2_i32:
1073            case INDEX_op_setcond2_i32:
1074            case INDEX_op_brcond_i64:
1075            case INDEX_op_setcond_i64:
1076            case INDEX_op_movcond_i64:
1077                if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
1078                    qemu_log(",%s", cond_name[args[k++]]);
1079                } else {
1080                    qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1081                }
1082                i = 1;
1083                break;
1084            case INDEX_op_qemu_ld_i32:
1085            case INDEX_op_qemu_st_i32:
1086            case INDEX_op_qemu_ld_i64:
1087            case INDEX_op_qemu_st_i64:
1088                {
1089                    TCGMemOpIdx oi = args[k++];
1090                    TCGMemOp op = get_memop(oi);
1091                    unsigned ix = get_mmuidx(oi);
1092
1093                    if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
1094                        qemu_log(",$0x%x,%u", op, ix);
1095                    } else {
1096                        const char *s_al = "", *s_op;
1097                        if (op & MO_AMASK) {
1098                            if ((op & MO_AMASK) == MO_ALIGN) {
1099                                s_al = "al+";
1100                            } else {
1101                                s_al = "un+";
1102                            }
1103                        }
1104                        s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
1105                        qemu_log(",%s%s,%u", s_al, s_op, ix);
1106                    }
1107                    i = 1;
1108                }
1109                break;
1110            default:
1111                i = 0;
1112                break;
1113            }
1114            switch (c) {
1115            case INDEX_op_set_label:
1116            case INDEX_op_br:
1117            case INDEX_op_brcond_i32:
1118            case INDEX_op_brcond_i64:
1119            case INDEX_op_brcond2_i32:
1120                qemu_log("%s$L%d", k ? "," : "", arg_label(args[k])->id);
1121                i++, k++;
1122                break;
1123            default:
1124                break;
1125            }
1126            for (; i < nb_cargs; i++, k++) {
1127                qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", args[k]);
1128            }
1129        }
1130        qemu_log("\n");
1131    }
1132}
1133
1134/* we give more priority to constraints with less registers */
1135static int get_constraint_priority(const TCGOpDef *def, int k)
1136{
1137    const TCGArgConstraint *arg_ct;
1138
1139    int i, n;
1140    arg_ct = &def->args_ct[k];
1141    if (arg_ct->ct & TCG_CT_ALIAS) {
1142        /* an alias is equivalent to a single register */
1143        n = 1;
1144    } else {
1145        if (!(arg_ct->ct & TCG_CT_REG))
1146            return 0;
1147        n = 0;
1148        for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1149            if (tcg_regset_test_reg(arg_ct->u.regs, i))
1150                n++;
1151        }
1152    }
1153    return TCG_TARGET_NB_REGS - n + 1;
1154}
1155
1156/* sort from highest priority to lowest */
1157static void sort_constraints(TCGOpDef *def, int start, int n)
1158{
1159    int i, j, p1, p2, tmp;
1160
1161    for(i = 0; i < n; i++)
1162        def->sorted_args[start + i] = start + i;
1163    if (n <= 1)
1164        return;
1165    for(i = 0; i < n - 1; i++) {
1166        for(j = i + 1; j < n; j++) {
1167            p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1168            p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1169            if (p1 < p2) {
1170                tmp = def->sorted_args[start + i];
1171                def->sorted_args[start + i] = def->sorted_args[start + j];
1172                def->sorted_args[start + j] = tmp;
1173            }
1174        }
1175    }
1176}
1177
1178void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
1179{
1180    TCGOpcode op;
1181    TCGOpDef *def;
1182    const char *ct_str;
1183    int i, nb_args;
1184
1185    for(;;) {
1186        if (tdefs->op == (TCGOpcode)-1)
1187            break;
1188        op = tdefs->op;
1189        tcg_debug_assert((unsigned)op < NB_OPS);
1190        def = &tcg_op_defs[op];
1191#if defined(CONFIG_DEBUG_TCG)
1192        /* Duplicate entry in op definitions? */
1193        tcg_debug_assert(!def->used);
1194        def->used = 1;
1195#endif
1196        nb_args = def->nb_iargs + def->nb_oargs;
1197        for(i = 0; i < nb_args; i++) {
1198            ct_str = tdefs->args_ct_str[i];
1199            /* Incomplete TCGTargetOpDef entry? */
1200            tcg_debug_assert(ct_str != NULL);
1201            tcg_regset_clear(def->args_ct[i].u.regs);
1202            def->args_ct[i].ct = 0;
1203            if (ct_str[0] >= '0' && ct_str[0] <= '9') {
1204                int oarg;
1205                oarg = ct_str[0] - '0';
1206                tcg_debug_assert(oarg < def->nb_oargs);
1207                tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
1208                /* TCG_CT_ALIAS is for the output arguments. The input
1209                   argument is tagged with TCG_CT_IALIAS. */
1210                def->args_ct[i] = def->args_ct[oarg];
1211                def->args_ct[oarg].ct = TCG_CT_ALIAS;
1212                def->args_ct[oarg].alias_index = i;
1213                def->args_ct[i].ct |= TCG_CT_IALIAS;
1214                def->args_ct[i].alias_index = oarg;
1215            } else {
1216                for(;;) {
1217                    if (*ct_str == '\0')
1218                        break;
1219                    switch(*ct_str) {
1220                    case 'i':
1221                        def->args_ct[i].ct |= TCG_CT_CONST;
1222                        ct_str++;
1223                        break;
1224                    default:
1225                        if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
1226                            fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1227                                    ct_str, i, def->name);
1228                            exit(1);
1229                        }
1230                    }
1231                }
1232            }
1233        }
1234
1235        /* TCGTargetOpDef entry with too much information? */
1236        tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1237
1238        /* sort the constraints (XXX: this is just an heuristic) */
1239        sort_constraints(def, 0, def->nb_oargs);
1240        sort_constraints(def, def->nb_oargs, def->nb_iargs);
1241
1242#if 0
1243        {
1244            int i;
1245
1246            printf("%s: sorted=", def->name);
1247            for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
1248                printf(" %d", def->sorted_args[i]);
1249            printf("\n");
1250        }
1251#endif
1252        tdefs++;
1253    }
1254
1255#if defined(CONFIG_DEBUG_TCG)
1256    i = 0;
1257    for (op = 0; op < tcg_op_defs_max; op++) {
1258        const TCGOpDef *def = &tcg_op_defs[op];
1259        if (def->flags & TCG_OPF_NOT_PRESENT) {
1260            /* Wrong entry in op definitions? */
1261            if (def->used) {
1262                fprintf(stderr, "Invalid op definition for %s\n", def->name);
1263                i = 1;
1264            }
1265        } else {
1266            /* Missing entry in op definitions? */
1267            if (!def->used) {
1268                fprintf(stderr, "Missing op definition for %s\n", def->name);
1269                i = 1;
1270            }
1271        }
1272    }
1273    if (i == 1) {
1274        tcg_abort();
1275    }
1276#endif
1277}
1278
1279void tcg_op_remove(TCGContext *s, TCGOp *op)
1280{
1281    int next = op->next;
1282    int prev = op->prev;
1283
1284    if (next >= 0) {
1285        s->gen_op_buf[next].prev = prev;
1286    } else {
1287        s->gen_last_op_idx = prev;
1288    }
1289    if (prev >= 0) {
1290        s->gen_op_buf[prev].next = next;
1291    } else {
1292        s->gen_first_op_idx = next;
1293    }
1294
1295    memset(op, -1, sizeof(*op));
1296
1297#ifdef CONFIG_PROFILER
1298    s->del_op_count++;
1299#endif
1300}
1301
1302#ifdef USE_LIVENESS_ANALYSIS
1303/* liveness analysis: end of function: all temps are dead, and globals
1304   should be in memory. */
1305static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps,
1306                                   uint8_t *mem_temps)
1307{
1308    memset(dead_temps, 1, s->nb_temps);
1309    memset(mem_temps, 1, s->nb_globals);
1310    memset(mem_temps + s->nb_globals, 0, s->nb_temps - s->nb_globals);
1311}
1312
1313/* liveness analysis: end of basic block: all temps are dead, globals
1314   and local temps should be in memory. */
1315static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps,
1316                                 uint8_t *mem_temps)
1317{
1318    int i;
1319
1320    memset(dead_temps, 1, s->nb_temps);
1321    memset(mem_temps, 1, s->nb_globals);
1322    for(i = s->nb_globals; i < s->nb_temps; i++) {
1323        mem_temps[i] = s->temps[i].temp_local;
1324    }
1325}
1326
1327/* Liveness analysis : update the opc_dead_args array to tell if a
1328   given input arguments is dead. Instructions updating dead
1329   temporaries are removed. */
1330static void tcg_liveness_analysis(TCGContext *s)
1331{
1332    uint8_t *dead_temps, *mem_temps;
1333    int oi, oi_prev, nb_ops;
1334
1335    nb_ops = s->gen_next_op_idx;
1336    s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1337    s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1338    
1339    dead_temps = tcg_malloc(s->nb_temps);
1340    mem_temps = tcg_malloc(s->nb_temps);
1341    tcg_la_func_end(s, dead_temps, mem_temps);
1342
1343    for (oi = s->gen_last_op_idx; oi >= 0; oi = oi_prev) {
1344        int i, nb_iargs, nb_oargs;
1345        TCGOpcode opc_new, opc_new2;
1346        bool have_opc_new2;
1347        uint16_t dead_args;
1348        uint8_t sync_args;
1349        TCGArg arg;
1350
1351        TCGOp * const op = &s->gen_op_buf[oi];
1352        TCGArg * const args = &s->gen_opparam_buf[op->args];
1353        TCGOpcode opc = op->opc;
1354        const TCGOpDef *def = &tcg_op_defs[opc];
1355
1356        oi_prev = op->prev;
1357
1358        switch (opc) {
1359        case INDEX_op_call:
1360            {
1361                int call_flags;
1362
1363                nb_oargs = op->callo;
1364                nb_iargs = op->calli;
1365                call_flags = args[nb_oargs + nb_iargs + 1];
1366
1367                /* pure functions can be removed if their result is unused */
1368                if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
1369                    for (i = 0; i < nb_oargs; i++) {
1370                        arg = args[i];
1371                        if (!dead_temps[arg] || mem_temps[arg]) {
1372                            goto do_not_remove_call;
1373                        }
1374                    }
1375                    goto do_remove;
1376                } else {
1377                do_not_remove_call:
1378
1379                    /* output args are dead */
1380                    dead_args = 0;
1381                    sync_args = 0;
1382                    for (i = 0; i < nb_oargs; i++) {
1383                        arg = args[i];
1384                        if (dead_temps[arg]) {
1385                            dead_args |= (1 << i);
1386                        }
1387                        if (mem_temps[arg]) {
1388                            sync_args |= (1 << i);
1389                        }
1390                        dead_temps[arg] = 1;
1391                        mem_temps[arg] = 0;
1392                    }
1393
1394                    if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
1395                        /* globals should be synced to memory */
1396                        memset(mem_temps, 1, s->nb_globals);
1397                    }
1398                    if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
1399                                        TCG_CALL_NO_READ_GLOBALS))) {
1400                        /* globals should go back to memory */
1401                        memset(dead_temps, 1, s->nb_globals);
1402                    }
1403
1404                    /* record arguments that die in this helper */
1405                    for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1406                        arg = args[i];
1407                        if (arg != TCG_CALL_DUMMY_ARG) {
1408                            if (dead_temps[arg]) {
1409                                dead_args |= (1 << i);
1410                            }
1411                        }
1412                    }
1413                    /* input arguments are live for preceding opcodes */
1414                    for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1415                        arg = args[i];
1416                        dead_temps[arg] = 0;
1417                    }
1418                    s->op_dead_args[oi] = dead_args;
1419                    s->op_sync_args[oi] = sync_args;
1420                }
1421            }
1422            break;
1423        case INDEX_op_insn_start:
1424            break;
1425        case INDEX_op_discard:
1426            /* mark the temporary as dead */
1427            dead_temps[args[0]] = 1;
1428            mem_temps[args[0]] = 0;
1429            break;
1430
1431        case INDEX_op_add2_i32:
1432            opc_new = INDEX_op_add_i32;
1433            goto do_addsub2;
1434        case INDEX_op_sub2_i32:
1435            opc_new = INDEX_op_sub_i32;
1436            goto do_addsub2;
1437        case INDEX_op_add2_i64:
1438            opc_new = INDEX_op_add_i64;
1439            goto do_addsub2;
1440        case INDEX_op_sub2_i64:
1441            opc_new = INDEX_op_sub_i64;
1442        do_addsub2:
1443            nb_iargs = 4;
1444            nb_oargs = 2;
1445            /* Test if the high part of the operation is dead, but not
1446               the low part.  The result can be optimized to a simple
1447               add or sub.  This happens often for x86_64 guest when the
1448               cpu mode is set to 32 bit.  */
1449            if (dead_temps[args[1]] && !mem_temps[args[1]]) {
1450                if (dead_temps[args[0]] && !mem_temps[args[0]]) {
1451                    goto do_remove;
1452                }
1453                /* Replace the opcode and adjust the args in place,
1454                   leaving 3 unused args at the end.  */
1455                op->opc = opc = opc_new;
1456                args[1] = args[2];
1457                args[2] = args[4];
1458                /* Fall through and mark the single-word operation live.  */
1459                nb_iargs = 2;
1460                nb_oargs = 1;
1461            }
1462            goto do_not_remove;
1463
1464        case INDEX_op_mulu2_i32:
1465            opc_new = INDEX_op_mul_i32;
1466            opc_new2 = INDEX_op_muluh_i32;
1467            have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
1468            goto do_mul2;
1469        case INDEX_op_muls2_i32:
1470            opc_new = INDEX_op_mul_i32;
1471            opc_new2 = INDEX_op_mulsh_i32;
1472            have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
1473            goto do_mul2;
1474        case INDEX_op_mulu2_i64:
1475            opc_new = INDEX_op_mul_i64;
1476            opc_new2 = INDEX_op_muluh_i64;
1477            have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
1478            goto do_mul2;
1479        case INDEX_op_muls2_i64:
1480            opc_new = INDEX_op_mul_i64;
1481            opc_new2 = INDEX_op_mulsh_i64;
1482            have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
1483            goto do_mul2;
1484        do_mul2:
1485            nb_iargs = 2;
1486            nb_oargs = 2;
1487            if (dead_temps[args[1]] && !mem_temps[args[1]]) {
1488                if (dead_temps[args[0]] && !mem_temps[args[0]]) {
1489                    /* Both parts of the operation are dead.  */
1490                    goto do_remove;
1491                }
1492                /* The high part of the operation is dead; generate the low. */
1493                op->opc = opc = opc_new;
1494                args[1] = args[2];
1495                args[2] = args[3];
1496            } else if (have_opc_new2 && dead_temps[args[0]]
1497                       && !mem_temps[args[0]]) {
1498                /* The low part of the operation is dead; generate the high. */
1499                op->opc = opc = opc_new2;
1500                args[0] = args[1];
1501                args[1] = args[2];
1502                args[2] = args[3];
1503            } else {
1504                goto do_not_remove;
1505            }
1506            /* Mark the single-word operation live.  */
1507            nb_oargs = 1;
1508            goto do_not_remove;
1509
1510        default:
1511            /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1512            nb_iargs = def->nb_iargs;
1513            nb_oargs = def->nb_oargs;
1514
1515            /* Test if the operation can be removed because all
1516               its outputs are dead. We assume that nb_oargs == 0
1517               implies side effects */
1518            if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1519                for (i = 0; i < nb_oargs; i++) {
1520                    arg = args[i];
1521                    if (!dead_temps[arg] || mem_temps[arg]) {
1522                        goto do_not_remove;
1523                    }
1524                }
1525            do_remove:
1526                tcg_op_remove(s, op);
1527            } else {
1528            do_not_remove:
1529                /* output args are dead */
1530                dead_args = 0;
1531                sync_args = 0;
1532                for (i = 0; i < nb_oargs; i++) {
1533                    arg = args[i];
1534                    if (dead_temps[arg]) {
1535                        dead_args |= (1 << i);
1536                    }
1537                    if (mem_temps[arg]) {
1538                        sync_args |= (1 << i);
1539                    }
1540                    dead_temps[arg] = 1;
1541                    mem_temps[arg] = 0;
1542                }
1543
1544                /* if end of basic block, update */
1545                if (def->flags & TCG_OPF_BB_END) {
1546                    tcg_la_bb_end(s, dead_temps, mem_temps);
1547                } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1548                    /* globals should be synced to memory */
1549                    memset(mem_temps, 1, s->nb_globals);
1550                }
1551
1552                /* record arguments that die in this opcode */
1553                for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1554                    arg = args[i];
1555                    if (dead_temps[arg]) {
1556                        dead_args |= (1 << i);
1557                    }
1558                }
1559                /* input arguments are live for preceding opcodes */
1560                for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1561                    arg = args[i];
1562                    dead_temps[arg] = 0;
1563                }
1564                s->op_dead_args[oi] = dead_args;
1565                s->op_sync_args[oi] = sync_args;
1566            }
1567            break;
1568        }
1569    }
1570}
1571#else
1572/* dummy liveness analysis */
1573static void tcg_liveness_analysis(TCGContext *s)
1574{
1575    int nb_ops = s->gen_next_op_idx;
1576
1577    s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1578    memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t));
1579    s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1580    memset(s->op_sync_args, 0, nb_ops * sizeof(uint8_t));
1581}
1582#endif
1583
1584#ifdef CONFIG_DEBUG_TCG
1585static void dump_regs(TCGContext *s)
1586{
1587    TCGTemp *ts;
1588    int i;
1589    char buf[64];
1590
1591    for(i = 0; i < s->nb_temps; i++) {
1592        ts = &s->temps[i];
1593        printf("  %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1594        switch(ts->val_type) {
1595        case TEMP_VAL_REG:
1596            printf("%s", tcg_target_reg_names[ts->reg]);
1597            break;
1598        case TEMP_VAL_MEM:
1599            printf("%d(%s)", (int)ts->mem_offset,
1600                   tcg_target_reg_names[ts->mem_base->reg]);
1601            break;
1602        case TEMP_VAL_CONST:
1603            printf("$0x%" TCG_PRIlx, ts->val);
1604            break;
1605        case TEMP_VAL_DEAD:
1606            printf("D");
1607            break;
1608        default:
1609            printf("???");
1610            break;
1611        }
1612        printf("\n");
1613    }
1614
1615    for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1616        if (s->reg_to_temp[i] != NULL) {
1617            printf("%s: %s\n", 
1618                   tcg_target_reg_names[i], 
1619                   tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
1620        }
1621    }
1622}
1623
1624static void check_regs(TCGContext *s)
1625{
1626    int reg;
1627    int k;
1628    TCGTemp *ts;
1629    char buf[64];
1630
1631    for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1632        ts = s->reg_to_temp[reg];
1633        if (ts != NULL) {
1634            if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
1635                printf("Inconsistency for register %s:\n", 
1636                       tcg_target_reg_names[reg]);
1637                goto fail;
1638            }
1639        }
1640    }
1641    for (k = 0; k < s->nb_temps; k++) {
1642        ts = &s->temps[k];
1643        if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
1644            && s->reg_to_temp[ts->reg] != ts) {
1645            printf("Inconsistency for temp %s:\n",
1646                   tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
1647        fail:
1648            printf("reg state:\n");
1649            dump_regs(s);
1650            tcg_abort();
1651        }
1652    }
1653}
1654#endif
1655
1656static void temp_allocate_frame(TCGContext *s, int temp)
1657{
1658    TCGTemp *ts;
1659    ts = &s->temps[temp];
1660#if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
1661    /* Sparc64 stack is accessed with offset of 2047 */
1662    s->current_frame_offset = (s->current_frame_offset +
1663                               (tcg_target_long)sizeof(tcg_target_long) - 1) &
1664        ~(sizeof(tcg_target_long) - 1);
1665#endif
1666    if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
1667        s->frame_end) {
1668        tcg_abort();
1669    }
1670    ts->mem_offset = s->current_frame_offset;
1671    ts->mem_base = s->frame_temp;
1672    ts->mem_allocated = 1;
1673    s->current_frame_offset += sizeof(tcg_target_long);
1674}
1675
1676static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet);
1677
1678/* sync register 'reg' by saving it to the corresponding temporary */
1679static void tcg_reg_sync(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
1680{
1681    TCGTemp *ts = s->reg_to_temp[reg];
1682
1683    tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
1684    if (!ts->mem_coherent && !ts->fixed_reg) {
1685        if (!ts->mem_allocated) {
1686            temp_allocate_frame(s, temp_idx(s, ts));
1687        } else if (ts->indirect_reg) {
1688            tcg_regset_set_reg(allocated_regs, ts->reg);
1689            temp_load(s, ts->mem_base,
1690                      tcg_target_available_regs[TCG_TYPE_PTR],
1691                      allocated_regs);
1692        }
1693        tcg_out_st(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
1694    }
1695    ts->mem_coherent = 1;
1696}
1697
1698/* free register 'reg' by spilling the corresponding temporary if necessary */
1699static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
1700{
1701    TCGTemp *ts = s->reg_to_temp[reg];
1702
1703    if (ts != NULL) {
1704        tcg_reg_sync(s, reg, allocated_regs);
1705        ts->val_type = TEMP_VAL_MEM;
1706        s->reg_to_temp[reg] = NULL;
1707    }
1708}
1709
1710/* Allocate a register belonging to reg1 & ~reg2 */
1711static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet desired_regs,
1712                            TCGRegSet allocated_regs, bool rev)
1713{
1714    int i, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
1715    const int *order;
1716    TCGReg reg;
1717    TCGRegSet reg_ct;
1718
1719    tcg_regset_andnot(reg_ct, desired_regs, allocated_regs);
1720    order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
1721
1722    /* first try free registers */
1723    for(i = 0; i < n; i++) {
1724        reg = order[i];
1725        if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == NULL)
1726            return reg;
1727    }
1728
1729    /* XXX: do better spill choice */
1730    for(i = 0; i < n; i++) {
1731        reg = order[i];
1732        if (tcg_regset_test_reg(reg_ct, reg)) {
1733            tcg_reg_free(s, reg, allocated_regs);
1734            return reg;
1735        }
1736    }
1737
1738    tcg_abort();
1739}
1740
1741/* Make sure the temporary is in a register.  If needed, allocate the register
1742   from DESIRED while avoiding ALLOCATED.  */
1743static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
1744                      TCGRegSet allocated_regs)
1745{
1746    TCGReg reg;
1747
1748    switch (ts->val_type) {
1749    case TEMP_VAL_REG:
1750        return;
1751    case TEMP_VAL_CONST:
1752        reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
1753        tcg_out_movi(s, ts->type, reg, ts->val);
1754        ts->mem_coherent = 0;
1755        break;
1756    case TEMP_VAL_MEM:
1757        reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
1758        if (ts->indirect_reg) {
1759            tcg_regset_set_reg(allocated_regs, reg);
1760            temp_load(s, ts->mem_base,
1761                      tcg_target_available_regs[TCG_TYPE_PTR],
1762                      allocated_regs);
1763        }
1764        tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
1765        ts->mem_coherent = 1;
1766        break;
1767    case TEMP_VAL_DEAD:
1768    default:
1769        tcg_abort();
1770    }
1771    ts->reg = reg;
1772    ts->val_type = TEMP_VAL_REG;
1773    s->reg_to_temp[reg] = ts;
1774}
1775
1776/* mark a temporary as dead. */
1777static inline void temp_dead(TCGContext *s, TCGTemp *ts)
1778{
1779    if (ts->fixed_reg) {
1780        return;
1781    }
1782    if (ts->val_type == TEMP_VAL_REG) {
1783        s->reg_to_temp[ts->reg] = NULL;
1784    }
1785    ts->val_type = (temp_idx(s, ts) < s->nb_globals || ts->temp_local
1786                    ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
1787}
1788
1789/* sync a temporary to memory. 'allocated_regs' is used in case a
1790   temporary registers needs to be allocated to store a constant. */
1791static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
1792{
1793    if (ts->fixed_reg) {
1794        return;
1795    }
1796    switch (ts->val_type) {
1797    case TEMP_VAL_CONST:
1798        temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs);
1799        /* fallthrough */
1800    case TEMP_VAL_REG:
1801        tcg_reg_sync(s, ts->reg, allocated_regs);
1802        break;
1803    case TEMP_VAL_DEAD:
1804    case TEMP_VAL_MEM:
1805        break;
1806    default:
1807        tcg_abort();
1808    }
1809}
1810
1811/* save a temporary to memory. 'allocated_regs' is used in case a
1812   temporary registers needs to be allocated to store a constant. */
1813static inline void temp_save(TCGContext *s, TCGTemp *ts,
1814                             TCGRegSet allocated_regs)
1815{
1816#ifdef USE_LIVENESS_ANALYSIS
1817    /* ??? Liveness does not yet incorporate indirect bases.  */
1818    if (!ts->indirect_base) {
1819        /* The liveness analysis already ensures that globals are back
1820           in memory. Keep an tcg_debug_assert for safety. */
1821        tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
1822        return;
1823    }
1824#endif
1825    temp_sync(s, ts, allocated_regs);
1826    temp_dead(s, ts);
1827}
1828
1829/* save globals to their canonical location and assume they can be
1830   modified be the following code. 'allocated_regs' is used in case a
1831   temporary registers needs to be allocated to store a constant. */
1832static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
1833{
1834    int i;
1835
1836    for (i = 0; i < s->nb_globals; i++) {
1837        temp_save(s, &s->temps[i], allocated_regs);
1838    }
1839}
1840
1841/* sync globals to their canonical location and assume they can be
1842   read by the following code. 'allocated_regs' is used in case a
1843   temporary registers needs to be allocated to store a constant. */
1844static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
1845{
1846    int i;
1847
1848    for (i = 0; i < s->nb_globals; i++) {
1849        TCGTemp *ts = &s->temps[i];
1850#ifdef USE_LIVENESS_ANALYSIS
1851        /* ??? Liveness does not yet incorporate indirect bases.  */
1852        if (!ts->indirect_base) {
1853            tcg_debug_assert(ts->val_type != TEMP_VAL_REG
1854                             || ts->fixed_reg
1855                             || ts->mem_coherent);
1856            continue;
1857        }
1858#endif
1859        temp_sync(s, ts, allocated_regs);
1860    }
1861}
1862
1863/* at the end of a basic block, we assume all temporaries are dead and
1864   all globals are stored at their canonical location. */
1865static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
1866{
1867    int i;
1868
1869    for (i = s->nb_globals; i < s->nb_temps; i++) {
1870        TCGTemp *ts = &s->temps[i];
1871        if (ts->temp_local) {
1872            temp_save(s, ts, allocated_regs);
1873        } else {
1874#ifdef USE_LIVENESS_ANALYSIS
1875            /* ??? Liveness does not yet incorporate indirect bases.  */
1876            if (!ts->indirect_base) {
1877                /* The liveness analysis already ensures that temps are dead.
1878                   Keep an tcg_debug_assert for safety. */
1879                tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
1880                continue;
1881            }
1882#endif
1883            temp_dead(s, ts);
1884        }
1885    }
1886
1887    save_globals(s, allocated_regs);
1888}
1889
1890#define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1)
1891#define NEED_SYNC_ARG(n) ((sync_args >> (n)) & 1)
1892
1893static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
1894                               uint16_t dead_args, uint8_t sync_args)
1895{
1896    TCGTemp *ots;
1897    tcg_target_ulong val;
1898
1899    ots = &s->temps[args[0]];
1900    val = args[1];
1901
1902    if (ots->fixed_reg) {
1903        /* for fixed registers, we do not do any constant
1904           propagation */
1905        tcg_out_movi(s, ots->type, ots->reg, val);
1906    } else {
1907        /* The movi is not explicitly generated here */
1908        if (ots->val_type == TEMP_VAL_REG) {
1909            s->reg_to_temp[ots->reg] = NULL;
1910        }
1911        ots->val_type = TEMP_VAL_CONST;
1912        ots->val = val;
1913    }
1914    if (NEED_SYNC_ARG(0)) {
1915        temp_sync(s, ots, s->reserved_regs);
1916    }
1917    if (IS_DEAD_ARG(0)) {
1918        temp_dead(s, ots);
1919    }
1920}
1921
1922static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
1923                              const TCGArg *args, uint16_t dead_args,
1924                              uint8_t sync_args)
1925{
1926    TCGRegSet allocated_regs;
1927    TCGTemp *ts, *ots;
1928    TCGType otype, itype;
1929
1930    tcg_regset_set(allocated_regs, s->reserved_regs);
1931    ots = &s->temps[args[0]];
1932    ts = &s->temps[args[1]];
1933
1934    /* Note that otype != itype for no-op truncation.  */
1935    otype = ots->type;
1936    itype = ts->type;
1937
1938    /* If the source value is not in a register, and we're going to be
1939       forced to have it in a register in order to perform the copy,
1940       then copy the SOURCE value into its own register first.  That way
1941       we don't have to reload SOURCE the next time it is used. */
1942    if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG)
1943        || ts->val_type == TEMP_VAL_MEM) {
1944        temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs);
1945    }
1946
1947    if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
1948        /* mov to a non-saved dead register makes no sense (even with
1949           liveness analysis disabled). */
1950        tcg_debug_assert(NEED_SYNC_ARG(0));
1951        /* The code above should have moved the temp to a register. */
1952        tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
1953        if (!ots->mem_allocated) {
1954            temp_allocate_frame(s, args[0]);
1955        }
1956        if (ots->indirect_reg) {
1957            tcg_regset_set_reg(allocated_regs, ts->reg);
1958            temp_load(s, ots->mem_base,
1959                      tcg_target_available_regs[TCG_TYPE_PTR],
1960                      allocated_regs);
1961        }
1962        tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
1963        if (IS_DEAD_ARG(1)) {
1964            temp_dead(s, ts);
1965        }
1966        temp_dead(s, ots);
1967    } else if (ts->val_type == TEMP_VAL_CONST) {
1968        /* propagate constant */
1969        if (ots->val_type == TEMP_VAL_REG) {
1970            s->reg_to_temp[ots->reg] = NULL;
1971        }
1972        ots->val_type = TEMP_VAL_CONST;
1973        ots->val = ts->val;
1974        if (IS_DEAD_ARG(1)) {
1975            temp_dead(s, ts);
1976        }
1977    } else {
1978        /* The code in the first if block should have moved the
1979           temp to a register. */
1980        tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
1981        if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
1982            /* the mov can be suppressed */
1983            if (ots->val_type == TEMP_VAL_REG) {
1984                s->reg_to_temp[ots->reg] = NULL;
1985            }
1986            ots->reg = ts->reg;
1987            temp_dead(s, ts);
1988        } else {
1989            if (ots->val_type != TEMP_VAL_REG) {
1990                /* When allocating a new register, make sure to not spill the
1991                   input one. */
1992                tcg_regset_set_reg(allocated_regs, ts->reg);
1993                ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
1994                                         allocated_regs, ots->indirect_base);
1995            }
1996            tcg_out_mov(s, otype, ots->reg, ts->reg);
1997        }
1998        ots->val_type = TEMP_VAL_REG;
1999        ots->mem_coherent = 0;
2000        s->reg_to_temp[ots->reg] = ots;
2001        if (NEED_SYNC_ARG(0)) {
2002            tcg_reg_sync(s, ots->reg, allocated_regs);
2003        }
2004    }
2005}
2006
2007static void tcg_reg_alloc_op(TCGContext *s, 
2008                             const TCGOpDef *def, TCGOpcode opc,
2009                             const TCGArg *args, uint16_t dead_args,
2010                             uint8_t sync_args)
2011{
2012    TCGRegSet allocated_regs;
2013    int i, k, nb_iargs, nb_oargs;
2014    TCGReg reg;
2015    TCGArg arg;
2016    const TCGArgConstraint *arg_ct;
2017    TCGTemp *ts;
2018    TCGArg new_args[TCG_MAX_OP_ARGS];
2019    int const_args[TCG_MAX_OP_ARGS];
2020
2021    nb_oargs = def->nb_oargs;
2022    nb_iargs = def->nb_iargs;
2023
2024    /* copy constants */
2025    memcpy(new_args + nb_oargs + nb_iargs, 
2026           args + nb_oargs + nb_iargs, 
2027           sizeof(TCGArg) * def->nb_cargs);
2028
2029    /* satisfy input constraints */ 
2030    tcg_regset_set(allocated_regs, s->reserved_regs);
2031    for(k = 0; k < nb_iargs; k++) {
2032        i = def->sorted_args[nb_oargs + k];
2033        arg = args[i];
2034        arg_ct = &def->args_ct[i];
2035        ts = &s->temps[arg];
2036
2037        if (ts->val_type == TEMP_VAL_CONST
2038            && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
2039            /* constant is OK for instruction */
2040            const_args[i] = 1;
2041            new_args[i] = ts->val;
2042            goto iarg_end;
2043        }
2044
2045        temp_load(s, ts, arg_ct->u.regs, allocated_regs);
2046
2047        if (arg_ct->ct & TCG_CT_IALIAS) {
2048            if (ts->fixed_reg) {
2049                /* if fixed register, we must allocate a new register
2050                   if the alias is not the same register */
2051                if (arg != args[arg_ct->alias_index])
2052                    goto allocate_in_reg;
2053            } else {
2054                /* if the input is aliased to an output and if it is
2055                   not dead after the instruction, we must allocate
2056                   a new register and move it */
2057                if (!IS_DEAD_ARG(i)) {
2058                    goto allocate_in_reg;
2059                }
2060                /* check if the current register has already been allocated
2061                   for another input aliased to an output */
2062                int k2, i2;
2063                for (k2 = 0 ; k2 < k ; k2++) {
2064                    i2 = def->sorted_args[nb_oargs + k2];
2065                    if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
2066                        (new_args[i2] == ts->reg)) {
2067                        goto allocate_in_reg;
2068                    }
2069                }
2070            }
2071        }
2072        reg = ts->reg;
2073        if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2074            /* nothing to do : the constraint is satisfied */
2075        } else {
2076        allocate_in_reg:
2077            /* allocate a new register matching the constraint 
2078               and move the temporary register into it */
2079            reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs,
2080                                ts->indirect_base);
2081            tcg_out_mov(s, ts->type, reg, ts->reg);
2082        }
2083        new_args[i] = reg;
2084        const_args[i] = 0;
2085        tcg_regset_set_reg(allocated_regs, reg);
2086    iarg_end: ;
2087    }
2088    
2089    /* mark dead temporaries and free the associated registers */
2090    for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2091        if (IS_DEAD_ARG(i)) {
2092            temp_dead(s, &s->temps[args[i]]);
2093        }
2094    }
2095
2096    if (def->flags & TCG_OPF_BB_END) {
2097        tcg_reg_alloc_bb_end(s, allocated_regs);
2098    } else {
2099        if (def->flags & TCG_OPF_CALL_CLOBBER) {
2100            /* XXX: permit generic clobber register list ? */ 
2101            for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2102                if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2103                    tcg_reg_free(s, i, allocated_regs);
2104                }
2105            }
2106        }
2107        if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2108            /* sync globals if the op has side effects and might trigger
2109               an exception. */
2110            sync_globals(s, allocated_regs);
2111        }
2112        
2113        /* satisfy the output constraints */
2114        tcg_regset_set(allocated_regs, s->reserved_regs);
2115        for(k = 0; k < nb_oargs; k++) {
2116            i = def->sorted_args[k];
2117            arg = args[i];
2118            arg_ct = &def->args_ct[i];
2119            ts = &s->temps[arg];
2120            if (arg_ct->ct & TCG_CT_ALIAS) {
2121                reg = new_args[arg_ct->alias_index];
2122            } else {
2123                /* if fixed register, we try to use it */
2124                reg = ts->reg;
2125                if (ts->fixed_reg &&
2126                    tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2127                    goto oarg_end;
2128                }
2129                reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs,
2130                                    ts->indirect_base);
2131            }
2132            tcg_regset_set_reg(allocated_regs, reg);
2133            /* if a fixed register is used, then a move will be done afterwards */
2134            if (!ts->fixed_reg) {
2135                if (ts->val_type == TEMP_VAL_REG) {
2136                    s->reg_to_temp[ts->reg] = NULL;
2137                }
2138                ts->val_type = TEMP_VAL_REG;
2139                ts->reg = reg;
2140                /* temp value is modified, so the value kept in memory is
2141                   potentially not the same */
2142                ts->mem_coherent = 0;
2143                s->reg_to_temp[reg] = ts;
2144            }
2145        oarg_end:
2146            new_args[i] = reg;
2147        }
2148    }
2149
2150    /* emit instruction */
2151    tcg_out_op(s, opc, new_args, const_args);
2152    
2153    /* move the outputs in the correct register if needed */
2154    for(i = 0; i < nb_oargs; i++) {
2155        ts = &s->temps[args[i]];
2156        reg = new_args[i];
2157        if (ts->fixed_reg && ts->reg != reg) {
2158            tcg_out_mov(s, ts->type, ts->reg, reg);
2159        }
2160        if (NEED_SYNC_ARG(i)) {
2161            tcg_reg_sync(s, reg, allocated_regs);
2162        }
2163        if (IS_DEAD_ARG(i)) {
2164            temp_dead(s, ts);
2165        }
2166    }
2167}
2168
2169#ifdef TCG_TARGET_STACK_GROWSUP
2170#define STACK_DIR(x) (-(x))
2171#else
2172#define STACK_DIR(x) (x)
2173#endif
2174
2175static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
2176                               const TCGArg * const args, uint16_t dead_args,
2177                               uint8_t sync_args)
2178{
2179    int flags, nb_regs, i;
2180    TCGReg reg;
2181    TCGArg arg;
2182    TCGTemp *ts;
2183    intptr_t stack_offset;
2184    size_t call_stack_size;
2185    tcg_insn_unit *func_addr;
2186    int allocate_args;
2187    TCGRegSet allocated_regs;
2188
2189    func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs];
2190    flags = args[nb_oargs + nb_iargs + 1];
2191
2192    nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2193    if (nb_regs > nb_iargs) {
2194        nb_regs = nb_iargs;
2195    }
2196
2197    /* assign stack slots first */
2198    call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
2199    call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) & 
2200        ~(TCG_TARGET_STACK_ALIGN - 1);
2201    allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
2202    if (allocate_args) {
2203        /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
2204           preallocate call stack */
2205        tcg_abort();
2206    }
2207
2208    stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
2209    for(i = nb_regs; i < nb_iargs; i++) {
2210        arg = args[nb_oargs + i];
2211#ifdef TCG_TARGET_STACK_GROWSUP
2212        stack_offset -= sizeof(tcg_target_long);
2213#endif
2214        if (arg != TCG_CALL_DUMMY_ARG) {
2215            ts = &s->temps[arg];
2216            temp_load(s, ts, tcg_target_available_regs[ts->type],
2217                      s->reserved_regs);
2218            tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
2219        }
2220#ifndef TCG_TARGET_STACK_GROWSUP
2221        stack_offset += sizeof(tcg_target_long);
2222#endif
2223    }
2224    
2225    /* assign input registers */
2226    tcg_regset_set(allocated_regs, s->reserved_regs);
2227    for(i = 0; i < nb_regs; i++) {
2228        arg = args[nb_oargs + i];
2229        if (arg != TCG_CALL_DUMMY_ARG) {
2230            ts = &s->temps[arg];
2231            reg = tcg_target_call_iarg_regs[i];
2232            tcg_reg_free(s, reg, allocated_regs);
2233
2234            if (ts->val_type == TEMP_VAL_REG) {
2235                if (ts->reg != reg) {
2236                    tcg_out_mov(s, ts->type, reg, ts->reg);
2237                }
2238            } else {
2239                TCGRegSet arg_set;
2240
2241                tcg_regset_clear(arg_set);
2242                tcg_regset_set_reg(arg_set, reg);
2243                temp_load(s, ts, arg_set, allocated_regs);
2244            }
2245
2246            tcg_regset_set_reg(allocated_regs, reg);
2247        }
2248    }
2249    
2250    /* mark dead temporaries and free the associated registers */
2251    for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2252        if (IS_DEAD_ARG(i)) {
2253            temp_dead(s, &s->temps[args[i]]);
2254        }
2255    }
2256    
2257    /* clobber call registers */
2258    for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2259        if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2260            tcg_reg_free(s, i, allocated_regs);
2261        }
2262    }
2263
2264    /* Save globals if they might be written by the helper, sync them if
2265       they might be read. */
2266    if (flags & TCG_CALL_NO_READ_GLOBALS) {
2267        /* Nothing to do */
2268    } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
2269        sync_globals(s, allocated_regs);
2270    } else {
2271        save_globals(s, allocated_regs);
2272    }
2273
2274    tcg_out_call(s, func_addr);
2275
2276    /* assign output registers and emit moves if needed */
2277    for(i = 0; i < nb_oargs; i++) {
2278        arg = args[i];
2279        ts = &s->temps[arg];
2280        reg = tcg_target_call_oarg_regs[i];
2281        tcg_debug_assert(s->reg_to_temp[reg] == NULL);
2282
2283        if (ts->fixed_reg) {
2284            if (ts->reg != reg) {
2285                tcg_out_mov(s, ts->type, ts->reg, reg);
2286            }
2287        } else {
2288            if (ts->val_type == TEMP_VAL_REG) {
2289                s->reg_to_temp[ts->reg] = NULL;
2290            }
2291            ts->val_type = TEMP_VAL_REG;
2292            ts->reg = reg;
2293            ts->mem_coherent = 0;
2294            s->reg_to_temp[reg] = ts;
2295            if (NEED_SYNC_ARG(i)) {
2296                tcg_reg_sync(s, reg, allocated_regs);
2297            }
2298            if (IS_DEAD_ARG(i)) {
2299                temp_dead(s, ts);
2300            }
2301        }
2302    }
2303}
2304
2305#ifdef CONFIG_PROFILER
2306
2307static int64_t tcg_table_op_count[NB_OPS];
2308
2309void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2310{
2311    int i;
2312
2313    for (i = 0; i < NB_OPS; i++) {
2314        cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
2315                    tcg_table_op_count[i]);
2316    }
2317}
2318#else
2319void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2320{
2321    cpu_fprintf(f, "[TCG profiler not compiled]\n");
2322}
2323#endif
2324
2325
2326int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
2327{
2328    int i, oi, oi_next, num_insns;
2329
2330#ifdef CONFIG_PROFILER
2331    {
2332        int n;
2333
2334        n = s->gen_last_op_idx + 1;
2335        s->op_count += n;
2336        if (n > s->op_count_max) {
2337            s->op_count_max = n;
2338        }
2339
2340        n = s->nb_temps;
2341        s->temp_count += n;
2342        if (n > s->temp_count_max) {
2343            s->temp_count_max = n;
2344        }
2345    }
2346#endif
2347
2348#ifdef DEBUG_DISAS
2349    if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
2350                 && qemu_log_in_addr_range(tb->pc))) {
2351        qemu_log("OP:\n");
2352        tcg_dump_ops(s);
2353        qemu_log("\n");
2354    }
2355#endif
2356
2357#ifdef CONFIG_PROFILER
2358    s->opt_time -= profile_getclock();
2359#endif
2360
2361#ifdef USE_TCG_OPTIMIZATIONS
2362    tcg_optimize(s);
2363#endif
2364
2365#ifdef CONFIG_PROFILER
2366    s->opt_time += profile_getclock();
2367    s->la_time -= profile_getclock();
2368#endif
2369
2370    tcg_liveness_analysis(s);
2371
2372#ifdef CONFIG_PROFILER
2373    s->la_time += profile_getclock();
2374#endif
2375
2376#ifdef DEBUG_DISAS
2377    if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
2378                 && qemu_log_in_addr_range(tb->pc))) {
2379        qemu_log("OP after optimization and liveness analysis:\n");
2380        tcg_dump_ops(s);
2381        qemu_log("\n");
2382    }
2383#endif
2384
2385    tcg_reg_alloc_start(s);
2386
2387    s->code_buf = tb->tc_ptr;
2388    s->code_ptr = tb->tc_ptr;
2389
2390    tcg_out_tb_init(s);
2391
2392    num_insns = -1;
2393    for (oi = s->gen_first_op_idx; oi >= 0; oi = oi_next) {
2394        TCGOp * const op = &s->gen_op_buf[oi];
2395        TCGArg * const args = &s->gen_opparam_buf[op->args];
2396        TCGOpcode opc = op->opc;
2397        const TCGOpDef *def = &tcg_op_defs[opc];
2398        uint16_t dead_args = s->op_dead_args[oi];
2399        uint8_t sync_args = s->op_sync_args[oi];
2400
2401        oi_next = op->next;
2402#ifdef CONFIG_PROFILER
2403        tcg_table_op_count[opc]++;
2404#endif
2405
2406        switch (opc) {
2407        case INDEX_op_mov_i32:
2408        case INDEX_op_mov_i64:
2409            tcg_reg_alloc_mov(s, def, args, dead_args, sync_args);
2410            break;
2411        case INDEX_op_movi_i32:
2412        case INDEX_op_movi_i64:
2413            tcg_reg_alloc_movi(s, args, dead_args, sync_args);
2414            break;
2415        case INDEX_op_insn_start:
2416            if (num_insns >= 0) {
2417                s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2418            }
2419            num_insns++;
2420            for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
2421                target_ulong a;
2422#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
2423                a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
2424#else
2425                a = args[i];
2426#endif
2427                s->gen_insn_data[num_insns][i] = a;
2428            }
2429            break;
2430        case INDEX_op_discard:
2431            temp_dead(s, &s->temps[args[0]]);
2432            break;
2433        case INDEX_op_set_label:
2434            tcg_reg_alloc_bb_end(s, s->reserved_regs);
2435            tcg_out_label(s, arg_label(args[0]), s->code_ptr);
2436            break;
2437        case INDEX_op_call:
2438            tcg_reg_alloc_call(s, op->callo, op->calli, args,
2439                               dead_args, sync_args);
2440            break;
2441        default:
2442            /* Sanity check that we've not introduced any unhandled opcodes. */
2443            if (def->flags & TCG_OPF_NOT_PRESENT) {
2444                tcg_abort();
2445            }
2446            /* Note: in order to speed up the code, it would be much
2447               faster to have specialized register allocator functions for
2448               some common argument patterns */
2449            tcg_reg_alloc_op(s, def, opc, args, dead_args, sync_args);
2450            break;
2451        }
2452#ifdef CONFIG_DEBUG_TCG
2453        check_regs(s);
2454#endif
2455        /* Test for (pending) buffer overflow.  The assumption is that any
2456           one operation beginning below the high water mark cannot overrun
2457           the buffer completely.  Thus we can test for overflow after
2458           generating code without having to check during generation.  */
2459        if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
2460            return -1;
2461        }
2462    }
2463    tcg_debug_assert(num_insns >= 0);
2464    s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2465
2466    /* Generate TB finalization at the end of block */
2467    if (!tcg_out_tb_finalize(s)) {
2468        return -1;
2469    }
2470
2471    /* flush instruction cache */
2472    flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
2473
2474    return tcg_current_code_size(s);
2475}
2476
2477#ifdef CONFIG_PROFILER
2478void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2479{
2480    TCGContext *s = &tcg_ctx;
2481    int64_t tb_count = s->tb_count;
2482    int64_t tb_div_count = tb_count ? tb_count : 1;
2483    int64_t tot = s->interm_time + s->code_time;
2484
2485    cpu_fprintf(f, "JIT cycles          %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2486                tot, tot / 2.4e9);
2487    cpu_fprintf(f, "translated TBs      %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n", 
2488                tb_count, s->tb_count1 - tb_count,
2489                (double)(s->tb_count1 - s->tb_count)
2490                / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
2491    cpu_fprintf(f, "avg ops/TB          %0.1f max=%d\n", 
2492                (double)s->op_count / tb_div_count, s->op_count_max);
2493    cpu_fprintf(f, "deleted ops/TB      %0.2f\n",
2494                (double)s->del_op_count / tb_div_count);
2495    cpu_fprintf(f, "avg temps/TB        %0.2f max=%d\n",
2496                (double)s->temp_count / tb_div_count, s->temp_count_max);
2497    cpu_fprintf(f, "avg host code/TB    %0.1f\n",
2498                (double)s->code_out_len / tb_div_count);
2499    cpu_fprintf(f, "avg search data/TB  %0.1f\n",
2500                (double)s->search_out_len / tb_div_count);
2501    
2502    cpu_fprintf(f, "cycles/op           %0.1f\n", 
2503                s->op_count ? (double)tot / s->op_count : 0);
2504    cpu_fprintf(f, "cycles/in byte      %0.1f\n", 
2505                s->code_in_len ? (double)tot / s->code_in_len : 0);
2506    cpu_fprintf(f, "cycles/out byte     %0.1f\n", 
2507                s->code_out_len ? (double)tot / s->code_out_len : 0);
2508    cpu_fprintf(f, "cycles/search byte     %0.1f\n",
2509                s->search_out_len ? (double)tot / s->search_out_len : 0);
2510    if (tot == 0) {
2511        tot = 1;
2512    }
2513    cpu_fprintf(f, "  gen_interm time   %0.1f%%\n", 
2514                (double)s->interm_time / tot * 100.0);
2515    cpu_fprintf(f, "  gen_code time     %0.1f%%\n", 
2516                (double)s->code_time / tot * 100.0);
2517    cpu_fprintf(f, "optim./code time    %0.1f%%\n",
2518                (double)s->opt_time / (s->code_time ? s->code_time : 1)
2519                * 100.0);
2520    cpu_fprintf(f, "liveness/code time  %0.1f%%\n", 
2521                (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2522    cpu_fprintf(f, "cpu_restore count   %" PRId64 "\n",
2523                s->restore_count);
2524    cpu_fprintf(f, "  avg cycles        %0.1f\n",
2525                s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2526}
2527#else
2528void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2529{
2530    cpu_fprintf(f, "[TCG profiler not compiled]\n");
2531}
2532#endif
2533
2534#ifdef ELF_HOST_MACHINE
2535/* In order to use this feature, the backend needs to do three things:
2536
2537   (1) Define ELF_HOST_MACHINE to indicate both what value to
2538       put into the ELF image and to indicate support for the feature.
2539
2540   (2) Define tcg_register_jit.  This should create a buffer containing
2541       the contents of a .debug_frame section that describes the post-
2542       prologue unwind info for the tcg machine.
2543
2544   (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2545*/
2546
2547/* Begin GDB interface.  THE FOLLOWING MUST MATCH GDB DOCS.  */
2548typedef enum {
2549    JIT_NOACTION = 0,
2550    JIT_REGISTER_FN,
2551    JIT_UNREGISTER_FN
2552} jit_actions_t;
2553
2554struct jit_code_entry {
2555    struct jit_code_entry *next_entry;
2556    struct jit_code_entry *prev_entry;
2557    const void *symfile_addr;
2558    uint64_t symfile_size;
2559};
2560
2561struct jit_descriptor {
2562    uint32_t version;
2563    uint32_t action_flag;
2564    struct jit_code_entry *relevant_entry;
2565    struct jit_code_entry *first_entry;
2566};
2567
2568void __jit_debug_register_code(void) __attribute__((noinline));
2569void __jit_debug_register_code(void)
2570{
2571    asm("");
2572}
2573
2574/* Must statically initialize the version, because GDB may check
2575   the version before we can set it.  */
2576struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
2577
2578/* End GDB interface.  */
2579
2580static int find_string(const char *strtab, const char *str)
2581{
2582    const char *p = strtab + 1;
2583
2584    while (1) {
2585        if (strcmp(p, str) == 0) {
2586            return p - strtab;
2587        }
2588        p += strlen(p) + 1;
2589    }
2590}
2591
2592static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2593                                 const void *debug_frame,
2594                                 size_t debug_frame_size)
2595{
2596    struct __attribute__((packed)) DebugInfo {
2597        uint32_t  len;
2598        uint16_t  version;
2599        uint32_t  abbrev;
2600        uint8_t   ptr_size;
2601        uint8_t   cu_die;
2602        uint16_t  cu_lang;
2603        uintptr_t cu_low_pc;
2604        uintptr_t cu_high_pc;
2605        uint8_t   fn_die;
2606        char      fn_name[16];
2607        uintptr_t fn_low_pc;
2608        uintptr_t fn_high_pc;
2609        uint8_t   cu_eoc;
2610    };
2611
2612    struct ElfImage {
2613        ElfW(Ehdr) ehdr;
2614        ElfW(Phdr) phdr;
2615        ElfW(Shdr) shdr[7];
2616        ElfW(Sym)  sym[2];
2617        struct DebugInfo di;
2618        uint8_t    da[24];
2619        char       str[80];
2620    };
2621
2622    struct ElfImage *img;
2623
2624    static const struct ElfImage img_template = {
2625        .ehdr = {
2626            .e_ident[EI_MAG0] = ELFMAG0,
2627            .e_ident[EI_MAG1] = ELFMAG1,
2628            .e_ident[EI_MAG2] = ELFMAG2,
2629            .e_ident[EI_MAG3] = ELFMAG3,
2630            .e_ident[EI_CLASS] = ELF_CLASS,
2631            .e_ident[EI_DATA] = ELF_DATA,
2632            .e_ident[EI_VERSION] = EV_CURRENT,
2633            .e_type = ET_EXEC,
2634            .e_machine = ELF_HOST_MACHINE,
2635            .e_version = EV_CURRENT,
2636            .e_phoff = offsetof(struct ElfImage, phdr),
2637            .e_shoff = offsetof(struct ElfImage, shdr),
2638            .e_ehsize = sizeof(ElfW(Shdr)),
2639            .e_phentsize = sizeof(ElfW(Phdr)),
2640            .e_phnum = 1,
2641            .e_shentsize = sizeof(ElfW(Shdr)),
2642            .e_shnum = ARRAY_SIZE(img->shdr),
2643            .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
2644#ifdef ELF_HOST_FLAGS
2645            .e_flags = ELF_HOST_FLAGS,
2646#endif
2647#ifdef ELF_OSABI
2648            .e_ident[EI_OSABI] = ELF_OSABI,
2649#endif
2650        },
2651        .phdr = {
2652            .p_type = PT_LOAD,
2653            .p_flags = PF_X,
2654        },
2655        .shdr = {
2656            [0] = { .sh_type = SHT_NULL },
2657            /* Trick: The contents of code_gen_buffer are not present in
2658               this fake ELF file; that got allocated elsewhere.  Therefore
2659               we mark .text as SHT_NOBITS (similar to .bss) so that readers
2660               will not look for contents.  We can record any address.  */
2661            [1] = { /* .text */
2662                .sh_type = SHT_NOBITS,
2663                .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
2664            },
2665            [2] = { /* .debug_info */
2666                .sh_type = SHT_PROGBITS,
2667                .sh_offset = offsetof(struct ElfImage, di),
2668                .sh_size = sizeof(struct DebugInfo),
2669            },
2670            [3] = { /* .debug_abbrev */
2671                .sh_type = SHT_PROGBITS,
2672                .sh_offset = offsetof(struct ElfImage, da),
2673                .sh_size = sizeof(img->da),
2674            },
2675            [4] = { /* .debug_frame */
2676                .sh_type = SHT_PROGBITS,
2677                .sh_offset = sizeof(struct ElfImage),
2678            },
2679            [5] = { /* .symtab */
2680                .sh_type = SHT_SYMTAB,
2681                .sh_offset = offsetof(struct ElfImage, sym),
2682                .sh_size = sizeof(img->sym),
2683                .sh_info = 1,
2684                .sh_link = ARRAY_SIZE(img->shdr) - 1,
2685                .sh_entsize = sizeof(ElfW(Sym)),
2686            },
2687            [6] = { /* .strtab */
2688                .sh_type = SHT_STRTAB,
2689                .sh_offset = offsetof(struct ElfImage, str),
2690                .sh_size = sizeof(img->str),
2691            }
2692        },
2693        .sym = {
2694            [1] = { /* code_gen_buffer */
2695                .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
2696                .st_shndx = 1,
2697            }
2698        },
2699        .di = {
2700            .len = sizeof(struct DebugInfo) - 4,
2701            .version = 2,
2702            .ptr_size = sizeof(void *),
2703            .cu_die = 1,
2704            .cu_lang = 0x8001,  /* DW_LANG_Mips_Assembler */
2705            .fn_die = 2,
2706            .fn_name = "code_gen_buffer"
2707        },
2708        .da = {
2709            1,          /* abbrev number (the cu) */
2710            0x11, 1,    /* DW_TAG_compile_unit, has children */
2711            0x13, 0x5,  /* DW_AT_language, DW_FORM_data2 */
2712            0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
2713            0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
2714            0, 0,       /* end of abbrev */
2715            2,          /* abbrev number (the fn) */
2716            0x2e, 0,    /* DW_TAG_subprogram, no children */
2717            0x3, 0x8,   /* DW_AT_name, DW_FORM_string */
2718            0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
2719            0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
2720            0, 0,       /* end of abbrev */
2721            0           /* no more abbrev */
2722        },
2723        .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2724               ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2725    };
2726
2727    /* We only need a single jit entry; statically allocate it.  */
2728    static struct jit_code_entry one_entry;
2729
2730    uintptr_t buf = (uintptr_t)buf_ptr;
2731    size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2732    DebugFrameHeader *dfh;
2733
2734    img = g_malloc(img_size);
2735    *img = img_template;
2736
2737    img->phdr.p_vaddr = buf;
2738    img->phdr.p_paddr = buf;
2739    img->phdr.p_memsz = buf_size;
2740
2741    img->shdr[1].sh_name = find_string(img->str, ".text");
2742    img->shdr[1].sh_addr = buf;
2743    img->shdr[1].sh_size = buf_size;
2744
2745    img->shdr[2].sh_name = find_string(img->str, ".debug_info");
2746    img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
2747
2748    img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
2749    img->shdr[4].sh_size = debug_frame_size;
2750
2751    img->shdr[5].sh_name = find_string(img->str, ".symtab");
2752    img->shdr[6].sh_name = find_string(img->str, ".strtab");
2753
2754    img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
2755    img->sym[1].st_value = buf;
2756    img->sym[1].st_size = buf_size;
2757
2758    img->di.cu_low_pc = buf;
2759    img->di.cu_high_pc = buf + buf_size;
2760    img->di.fn_low_pc = buf;
2761    img->di.fn_high_pc = buf + buf_size;
2762
2763    dfh = (DebugFrameHeader *)(img + 1);
2764    memcpy(dfh, debug_frame, debug_frame_size);
2765    dfh->fde.func_start = buf;
2766    dfh->fde.func_len = buf_size;
2767
2768#ifdef DEBUG_JIT
2769    /* Enable this block to be able to debug the ELF image file creation.
2770       One can use readelf, objdump, or other inspection utilities.  */
2771    {
2772        FILE *f = fopen("/tmp/qemu.jit", "w+b");
2773        if (f) {
2774            if (fwrite(img, img_size, 1, f) != img_size) {
2775                /* Avoid stupid unused return value warning for fwrite.  */
2776            }
2777            fclose(f);
2778        }
2779    }
2780#endif
2781
2782    one_entry.symfile_addr = img;
2783    one_entry.symfile_size = img_size;
2784
2785    __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
2786    __jit_debug_descriptor.relevant_entry = &one_entry;
2787    __jit_debug_descriptor.first_entry = &one_entry;
2788    __jit_debug_register_code();
2789}
2790#else
2791/* No support for the feature.  Provide the entry point expected by exec.c,
2792   and implement the internal function we declared earlier.  */
2793
2794static void tcg_register_jit_int(void *buf, size_t size,
2795                                 const void *debug_frame,
2796                                 size_t debug_frame_size)
2797{
2798}
2799
2800void tcg_register_jit(void *buf, size_t buf_size)
2801{
2802}
2803#endif /* ELF_HOST_MACHINE */
2804