qemu/accel/tcg/translate-all.c
<<
>>
Prefs
   1/*
   2 *  Host code generation
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu-common.h"
  22
  23#define NO_CPU_IO_DEFS
  24#include "trace.h"
  25#include "disas/disas.h"
  26#include "exec/exec-all.h"
  27#include "tcg/tcg.h"
  28#if defined(CONFIG_USER_ONLY)
  29#include "qemu.h"
  30#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  31#include <sys/param.h>
  32#if __FreeBSD_version >= 700104
  33#define HAVE_KINFO_GETVMMAP
  34#define sigqueue sigqueue_freebsd  /* avoid redefinition */
  35#include <sys/proc.h>
  36#include <machine/profile.h>
  37#define _KERNEL
  38#include <sys/user.h>
  39#undef _KERNEL
  40#undef sigqueue
  41#include <libutil.h>
  42#endif
  43#endif
  44#else
  45#include "exec/ram_addr.h"
  46#endif
  47
  48#include "exec/cputlb.h"
  49#include "exec/translate-all.h"
  50#include "qemu/bitmap.h"
  51#include "qemu/qemu-print.h"
  52#include "qemu/timer.h"
  53#include "qemu/main-loop.h"
  54#include "exec/log.h"
  55#include "qemu/etrace.h"
  56#include "sysemu/cpus.h"
  57#include "sysemu/cpu-timers.h"
  58#include "sysemu/tcg.h"
  59#include "qapi/error.h"
  60#include "hw/core/tcg-cpu-ops.h"
  61#include "tb-hash.h"
  62#include "tb-context.h"
  63#include "internal.h"
  64
  65/* #define DEBUG_TB_INVALIDATE */
  66/* #define DEBUG_TB_FLUSH */
  67/* make various TB consistency checks */
  68/* #define DEBUG_TB_CHECK */
  69
  70#ifdef DEBUG_TB_INVALIDATE
  71#define DEBUG_TB_INVALIDATE_GATE 1
  72#else
  73#define DEBUG_TB_INVALIDATE_GATE 0
  74#endif
  75
  76#ifdef DEBUG_TB_FLUSH
  77#define DEBUG_TB_FLUSH_GATE 1
  78#else
  79#define DEBUG_TB_FLUSH_GATE 0
  80#endif
  81
  82#if !defined(CONFIG_USER_ONLY)
  83/* TB consistency checks only implemented for usermode emulation.  */
  84#undef DEBUG_TB_CHECK
  85#endif
  86
  87#ifdef DEBUG_TB_CHECK
  88#define DEBUG_TB_CHECK_GATE 1
  89#else
  90#define DEBUG_TB_CHECK_GATE 0
  91#endif
  92
  93/* Access to the various translations structures need to be serialised via locks
  94 * for consistency.
  95 * In user-mode emulation access to the memory related structures are protected
  96 * with mmap_lock.
  97 * In !user-mode we use per-page locks.
  98 */
  99#ifdef CONFIG_SOFTMMU
 100#define assert_memory_lock()
 101#else
 102#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
 103#endif
 104
 105#define SMC_BITMAP_USE_THRESHOLD 10
 106
 107typedef struct PageDesc {
 108    /* list of TBs intersecting this ram page */
 109    uintptr_t first_tb;
 110#ifdef CONFIG_SOFTMMU
 111    /* in order to optimize self modifying code, we count the number
 112       of lookups we do to a given page to use a bitmap */
 113    unsigned long *code_bitmap;
 114    unsigned int code_write_count;
 115#else
 116    unsigned long flags;
 117    void *target_data;
 118#endif
 119#ifndef CONFIG_USER_ONLY
 120    QemuSpin lock;
 121#endif
 122} PageDesc;
 123
 124/**
 125 * struct page_entry - page descriptor entry
 126 * @pd:     pointer to the &struct PageDesc of the page this entry represents
 127 * @index:  page index of the page
 128 * @locked: whether the page is locked
 129 *
 130 * This struct helps us keep track of the locked state of a page, without
 131 * bloating &struct PageDesc.
 132 *
 133 * A page lock protects accesses to all fields of &struct PageDesc.
 134 *
 135 * See also: &struct page_collection.
 136 */
 137struct page_entry {
 138    PageDesc *pd;
 139    tb_page_addr_t index;
 140    bool locked;
 141};
 142
 143/**
 144 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
 145 * @tree:   Binary search tree (BST) of the pages, with key == page index
 146 * @max:    Pointer to the page in @tree with the highest page index
 147 *
 148 * To avoid deadlock we lock pages in ascending order of page index.
 149 * When operating on a set of pages, we need to keep track of them so that
 150 * we can lock them in order and also unlock them later. For this we collect
 151 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
 152 * @tree implementation we use does not provide an O(1) operation to obtain the
 153 * highest-ranked element, we use @max to keep track of the inserted page
 154 * with the highest index. This is valuable because if a page is not in
 155 * the tree and its index is higher than @max's, then we can lock it
 156 * without breaking the locking order rule.
 157 *
 158 * Note on naming: 'struct page_set' would be shorter, but we already have a few
 159 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
 160 *
 161 * See also: page_collection_lock().
 162 */
 163struct page_collection {
 164    GTree *tree;
 165    struct page_entry *max;
 166};
 167
 168/* list iterators for lists of tagged pointers in TranslationBlock */
 169#define TB_FOR_EACH_TAGGED(head, tb, n, field)                          \
 170    for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1);        \
 171         tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
 172             tb = (TranslationBlock *)((uintptr_t)tb & ~1))
 173
 174#define PAGE_FOR_EACH_TB(pagedesc, tb, n)                       \
 175    TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
 176
 177#define TB_FOR_EACH_JMP(head_tb, tb, n)                                 \
 178    TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
 179
 180/*
 181 * In system mode we want L1_MAP to be based on ram offsets,
 182 * while in user mode we want it to be based on virtual addresses.
 183 *
 184 * TODO: For user mode, see the caveat re host vs guest virtual
 185 * address spaces near GUEST_ADDR_MAX.
 186 */
 187#if !defined(CONFIG_USER_ONLY)
 188#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
 189# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
 190#else
 191# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
 192#endif
 193#else
 194# define L1_MAP_ADDR_SPACE_BITS  MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
 195#endif
 196
 197/* Size of the L2 (and L3, etc) page tables.  */
 198#define V_L2_BITS 10
 199#define V_L2_SIZE (1 << V_L2_BITS)
 200
 201/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
 202QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
 203                  sizeof_field(TranslationBlock, trace_vcpu_dstate)
 204                  * BITS_PER_BYTE);
 205
 206/*
 207 * L1 Mapping properties
 208 */
 209static int v_l1_size;
 210static int v_l1_shift;
 211static int v_l2_levels;
 212
 213/* The bottom level has pointers to PageDesc, and is indexed by
 214 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
 215 */
 216#define V_L1_MIN_BITS 4
 217#define V_L1_MAX_BITS (V_L2_BITS + 3)
 218#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
 219
 220static void *l1_map[V_L1_MAX_SIZE];
 221
 222TBContext tb_ctx;
 223
 224static void page_table_config_init(void)
 225{
 226    uint32_t v_l1_bits;
 227
 228    assert(TARGET_PAGE_BITS);
 229    /* The bits remaining after N lower levels of page tables.  */
 230    v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
 231    if (v_l1_bits < V_L1_MIN_BITS) {
 232        v_l1_bits += V_L2_BITS;
 233    }
 234
 235    v_l1_size = 1 << v_l1_bits;
 236    v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
 237    v_l2_levels = v_l1_shift / V_L2_BITS - 1;
 238
 239    assert(v_l1_bits <= V_L1_MAX_BITS);
 240    assert(v_l1_shift % V_L2_BITS == 0);
 241    assert(v_l2_levels >= 0);
 242}
 243
 244/* Encode VAL as a signed leb128 sequence at P.
 245   Return P incremented past the encoded value.  */
 246static uint8_t *encode_sleb128(uint8_t *p, target_long val)
 247{
 248    int more, byte;
 249
 250    do {
 251        byte = val & 0x7f;
 252        val >>= 7;
 253        more = !((val == 0 && (byte & 0x40) == 0)
 254                 || (val == -1 && (byte & 0x40) != 0));
 255        if (more) {
 256            byte |= 0x80;
 257        }
 258        *p++ = byte;
 259    } while (more);
 260
 261    return p;
 262}
 263
 264/* Decode a signed leb128 sequence at *PP; increment *PP past the
 265   decoded value.  Return the decoded value.  */
 266static target_long decode_sleb128(const uint8_t **pp)
 267{
 268    const uint8_t *p = *pp;
 269    target_long val = 0;
 270    int byte, shift = 0;
 271
 272    do {
 273        byte = *p++;
 274        val |= (target_ulong)(byte & 0x7f) << shift;
 275        shift += 7;
 276    } while (byte & 0x80);
 277    if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
 278        val |= -(target_ulong)1 << shift;
 279    }
 280
 281    *pp = p;
 282    return val;
 283}
 284
 285/* Encode the data collected about the instructions while compiling TB.
 286   Place the data at BLOCK, and return the number of bytes consumed.
 287
 288   The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
 289   which come from the target's insn_start data, followed by a uintptr_t
 290   which comes from the host pc of the end of the code implementing the insn.
 291
 292   Each line of the table is encoded as sleb128 deltas from the previous
 293   line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
 294   That is, the first column is seeded with the guest pc, the last column
 295   with the host pc, and the middle columns with zeros.  */
 296
 297static int encode_search(TranslationBlock *tb, uint8_t *block)
 298{
 299    uint8_t *highwater = tcg_ctx->code_gen_highwater;
 300    uint8_t *p = block;
 301    int i, j, n;
 302
 303    for (i = 0, n = tb->icount; i < n; ++i) {
 304        target_ulong prev;
 305
 306        for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
 307            if (i == 0) {
 308                prev = (j == 0 ? tb->pc : 0);
 309            } else {
 310                prev = tcg_ctx->gen_insn_data[i - 1][j];
 311            }
 312            p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
 313        }
 314        prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
 315        p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
 316
 317        /* Test for (pending) buffer overflow.  The assumption is that any
 318           one row beginning below the high water mark cannot overrun
 319           the buffer completely.  Thus we can test for overflow after
 320           encoding a row without having to check during encoding.  */
 321        if (unlikely(p > highwater)) {
 322            return -1;
 323        }
 324    }
 325
 326    return p - block;
 327}
 328
 329/* The cpu state corresponding to 'searched_pc' is restored.
 330 * When reset_icount is true, current TB will be interrupted and
 331 * icount should be recalculated.
 332 */
 333static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
 334                                     uintptr_t searched_pc, bool reset_icount)
 335{
 336    target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
 337    uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
 338    CPUArchState *env = cpu->env_ptr;
 339    const uint8_t *p = tb->tc.ptr + tb->tc.size;
 340    int i, j, num_insns = tb->icount;
 341#ifdef CONFIG_PROFILER
 342    TCGProfile *prof = &tcg_ctx->prof;
 343    int64_t ti = profile_getclock();
 344#endif
 345
 346    searched_pc -= GETPC_ADJ;
 347
 348    if (searched_pc < host_pc) {
 349        return -1;
 350    }
 351
 352    /* Reconstruct the stored insn data while looking for the point at
 353       which the end of the insn exceeds the searched_pc.  */
 354    for (i = 0; i < num_insns; ++i) {
 355        for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
 356            data[j] += decode_sleb128(&p);
 357        }
 358        host_pc += decode_sleb128(&p);
 359        if (host_pc > searched_pc) {
 360            goto found;
 361        }
 362    }
 363    return -1;
 364
 365 found:
 366    if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
 367        assert(icount_enabled());
 368        /* Reset the cycle counter to the start of the block
 369           and shift if to the number of actually executed instructions */
 370        cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
 371    }
 372    restore_state_to_opc(env, tb, data);
 373
 374#ifdef CONFIG_PROFILER
 375    qatomic_set(&prof->restore_time,
 376                prof->restore_time + profile_getclock() - ti);
 377    qatomic_set(&prof->restore_count, prof->restore_count + 1);
 378#endif
 379    return 0;
 380}
 381
 382bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
 383{
 384    /*
 385     * The host_pc has to be in the rx region of the code buffer.
 386     * If it is not we will not be able to resolve it here.
 387     * The two cases where host_pc will not be correct are:
 388     *
 389     *  - fault during translation (instruction fetch)
 390     *  - fault from helper (not using GETPC() macro)
 391     *
 392     * Either way we need return early as we can't resolve it here.
 393     */
 394    if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
 395        TranslationBlock *tb = tcg_tb_lookup(host_pc);
 396        if (tb) {
 397            cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
 398            return true;
 399        }
 400    }
 401    return false;
 402}
 403
 404void page_init(void)
 405{
 406    page_size_init();
 407    page_table_config_init();
 408
 409#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
 410    {
 411#ifdef HAVE_KINFO_GETVMMAP
 412        struct kinfo_vmentry *freep;
 413        int i, cnt;
 414
 415        freep = kinfo_getvmmap(getpid(), &cnt);
 416        if (freep) {
 417            mmap_lock();
 418            for (i = 0; i < cnt; i++) {
 419                unsigned long startaddr, endaddr;
 420
 421                startaddr = freep[i].kve_start;
 422                endaddr = freep[i].kve_end;
 423                if (h2g_valid(startaddr)) {
 424                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
 425
 426                    if (h2g_valid(endaddr)) {
 427                        endaddr = h2g(endaddr);
 428                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
 429                    } else {
 430#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
 431                        endaddr = ~0ul;
 432                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
 433#endif
 434                    }
 435                }
 436            }
 437            free(freep);
 438            mmap_unlock();
 439        }
 440#else
 441        FILE *f;
 442
 443        last_brk = (unsigned long)sbrk(0);
 444
 445        f = fopen("/compat/linux/proc/self/maps", "r");
 446        if (f) {
 447            mmap_lock();
 448
 449            do {
 450                unsigned long startaddr, endaddr;
 451                int n;
 452
 453                n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
 454
 455                if (n == 2 && h2g_valid(startaddr)) {
 456                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
 457
 458                    if (h2g_valid(endaddr)) {
 459                        endaddr = h2g(endaddr);
 460                    } else {
 461                        endaddr = ~0ul;
 462                    }
 463                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
 464                }
 465            } while (!feof(f));
 466
 467            fclose(f);
 468            mmap_unlock();
 469        }
 470#endif
 471    }
 472#endif
 473}
 474
 475static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
 476{
 477    PageDesc *pd;
 478    void **lp;
 479    int i;
 480
 481    /* Level 1.  Always allocated.  */
 482    lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
 483
 484    /* Level 2..N-1.  */
 485    for (i = v_l2_levels; i > 0; i--) {
 486        void **p = qatomic_rcu_read(lp);
 487
 488        if (p == NULL) {
 489            void *existing;
 490
 491            if (!alloc) {
 492                return NULL;
 493            }
 494            p = g_new0(void *, V_L2_SIZE);
 495            existing = qatomic_cmpxchg(lp, NULL, p);
 496            if (unlikely(existing)) {
 497                g_free(p);
 498                p = existing;
 499            }
 500        }
 501
 502        lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
 503    }
 504
 505    pd = qatomic_rcu_read(lp);
 506    if (pd == NULL) {
 507        void *existing;
 508
 509        if (!alloc) {
 510            return NULL;
 511        }
 512        pd = g_new0(PageDesc, V_L2_SIZE);
 513#ifndef CONFIG_USER_ONLY
 514        {
 515            int i;
 516
 517            for (i = 0; i < V_L2_SIZE; i++) {
 518                qemu_spin_init(&pd[i].lock);
 519            }
 520        }
 521#endif
 522        existing = qatomic_cmpxchg(lp, NULL, pd);
 523        if (unlikely(existing)) {
 524#ifndef CONFIG_USER_ONLY
 525            {
 526                int i;
 527
 528                for (i = 0; i < V_L2_SIZE; i++) {
 529                    qemu_spin_destroy(&pd[i].lock);
 530                }
 531            }
 532#endif
 533            g_free(pd);
 534            pd = existing;
 535        }
 536    }
 537
 538    return pd + (index & (V_L2_SIZE - 1));
 539}
 540
 541static inline PageDesc *page_find(tb_page_addr_t index)
 542{
 543    return page_find_alloc(index, 0);
 544}
 545
 546static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
 547                           PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
 548
 549/* In user-mode page locks aren't used; mmap_lock is enough */
 550#ifdef CONFIG_USER_ONLY
 551
 552#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
 553
 554static inline void page_lock(PageDesc *pd)
 555{ }
 556
 557static inline void page_unlock(PageDesc *pd)
 558{ }
 559
 560static inline void page_lock_tb(const TranslationBlock *tb)
 561{ }
 562
 563static inline void page_unlock_tb(const TranslationBlock *tb)
 564{ }
 565
 566struct page_collection *
 567page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
 568{
 569    return NULL;
 570}
 571
 572void page_collection_unlock(struct page_collection *set)
 573{ }
 574#else /* !CONFIG_USER_ONLY */
 575
 576#ifdef CONFIG_DEBUG_TCG
 577
 578static __thread GHashTable *ht_pages_locked_debug;
 579
 580static void ht_pages_locked_debug_init(void)
 581{
 582    if (ht_pages_locked_debug) {
 583        return;
 584    }
 585    ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
 586}
 587
 588static bool page_is_locked(const PageDesc *pd)
 589{
 590    PageDesc *found;
 591
 592    ht_pages_locked_debug_init();
 593    found = g_hash_table_lookup(ht_pages_locked_debug, pd);
 594    return !!found;
 595}
 596
 597static void page_lock__debug(PageDesc *pd)
 598{
 599    ht_pages_locked_debug_init();
 600    g_assert(!page_is_locked(pd));
 601    g_hash_table_insert(ht_pages_locked_debug, pd, pd);
 602}
 603
 604static void page_unlock__debug(const PageDesc *pd)
 605{
 606    bool removed;
 607
 608    ht_pages_locked_debug_init();
 609    g_assert(page_is_locked(pd));
 610    removed = g_hash_table_remove(ht_pages_locked_debug, pd);
 611    g_assert(removed);
 612}
 613
 614static void
 615do_assert_page_locked(const PageDesc *pd, const char *file, int line)
 616{
 617    if (unlikely(!page_is_locked(pd))) {
 618        error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
 619                     pd, file, line);
 620        abort();
 621    }
 622}
 623
 624#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
 625
 626void assert_no_pages_locked(void)
 627{
 628    ht_pages_locked_debug_init();
 629    g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
 630}
 631
 632#else /* !CONFIG_DEBUG_TCG */
 633
 634#define assert_page_locked(pd)
 635
 636static inline void page_lock__debug(const PageDesc *pd)
 637{
 638}
 639
 640static inline void page_unlock__debug(const PageDesc *pd)
 641{
 642}
 643
 644#endif /* CONFIG_DEBUG_TCG */
 645
 646static inline void page_lock(PageDesc *pd)
 647{
 648    page_lock__debug(pd);
 649    qemu_spin_lock(&pd->lock);
 650}
 651
 652static inline void page_unlock(PageDesc *pd)
 653{
 654    qemu_spin_unlock(&pd->lock);
 655    page_unlock__debug(pd);
 656}
 657
 658/* lock the page(s) of a TB in the correct acquisition order */
 659static inline void page_lock_tb(const TranslationBlock *tb)
 660{
 661    page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
 662}
 663
 664static inline void page_unlock_tb(const TranslationBlock *tb)
 665{
 666    PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
 667
 668    page_unlock(p1);
 669    if (unlikely(tb->page_addr[1] != -1)) {
 670        PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
 671
 672        if (p2 != p1) {
 673            page_unlock(p2);
 674        }
 675    }
 676}
 677
 678static inline struct page_entry *
 679page_entry_new(PageDesc *pd, tb_page_addr_t index)
 680{
 681    struct page_entry *pe = g_malloc(sizeof(*pe));
 682
 683    pe->index = index;
 684    pe->pd = pd;
 685    pe->locked = false;
 686    return pe;
 687}
 688
 689static void page_entry_destroy(gpointer p)
 690{
 691    struct page_entry *pe = p;
 692
 693    g_assert(pe->locked);
 694    page_unlock(pe->pd);
 695    g_free(pe);
 696}
 697
 698/* returns false on success */
 699static bool page_entry_trylock(struct page_entry *pe)
 700{
 701    bool busy;
 702
 703    busy = qemu_spin_trylock(&pe->pd->lock);
 704    if (!busy) {
 705        g_assert(!pe->locked);
 706        pe->locked = true;
 707        page_lock__debug(pe->pd);
 708    }
 709    return busy;
 710}
 711
 712static void do_page_entry_lock(struct page_entry *pe)
 713{
 714    page_lock(pe->pd);
 715    g_assert(!pe->locked);
 716    pe->locked = true;
 717}
 718
 719static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
 720{
 721    struct page_entry *pe = value;
 722
 723    do_page_entry_lock(pe);
 724    return FALSE;
 725}
 726
 727static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
 728{
 729    struct page_entry *pe = value;
 730
 731    if (pe->locked) {
 732        pe->locked = false;
 733        page_unlock(pe->pd);
 734    }
 735    return FALSE;
 736}
 737
 738/*
 739 * Trylock a page, and if successful, add the page to a collection.
 740 * Returns true ("busy") if the page could not be locked; false otherwise.
 741 */
 742static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
 743{
 744    tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
 745    struct page_entry *pe;
 746    PageDesc *pd;
 747
 748    pe = g_tree_lookup(set->tree, &index);
 749    if (pe) {
 750        return false;
 751    }
 752
 753    pd = page_find(index);
 754    if (pd == NULL) {
 755        return false;
 756    }
 757
 758    pe = page_entry_new(pd, index);
 759    g_tree_insert(set->tree, &pe->index, pe);
 760
 761    /*
 762     * If this is either (1) the first insertion or (2) a page whose index
 763     * is higher than any other so far, just lock the page and move on.
 764     */
 765    if (set->max == NULL || pe->index > set->max->index) {
 766        set->max = pe;
 767        do_page_entry_lock(pe);
 768        return false;
 769    }
 770    /*
 771     * Try to acquire out-of-order lock; if busy, return busy so that we acquire
 772     * locks in order.
 773     */
 774    return page_entry_trylock(pe);
 775}
 776
 777static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
 778{
 779    tb_page_addr_t a = *(const tb_page_addr_t *)ap;
 780    tb_page_addr_t b = *(const tb_page_addr_t *)bp;
 781
 782    if (a == b) {
 783        return 0;
 784    } else if (a < b) {
 785        return -1;
 786    }
 787    return 1;
 788}
 789
 790/*
 791 * Lock a range of pages ([@start,@end[) as well as the pages of all
 792 * intersecting TBs.
 793 * Locking order: acquire locks in ascending order of page index.
 794 */
 795struct page_collection *
 796page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
 797{
 798    struct page_collection *set = g_malloc(sizeof(*set));
 799    tb_page_addr_t index;
 800    PageDesc *pd;
 801
 802    start >>= TARGET_PAGE_BITS;
 803    end   >>= TARGET_PAGE_BITS;
 804    g_assert(start <= end);
 805
 806    set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
 807                                page_entry_destroy);
 808    set->max = NULL;
 809    assert_no_pages_locked();
 810
 811 retry:
 812    g_tree_foreach(set->tree, page_entry_lock, NULL);
 813
 814    for (index = start; index <= end; index++) {
 815        TranslationBlock *tb;
 816        int n;
 817
 818        pd = page_find(index);
 819        if (pd == NULL) {
 820            continue;
 821        }
 822        if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
 823            g_tree_foreach(set->tree, page_entry_unlock, NULL);
 824            goto retry;
 825        }
 826        assert_page_locked(pd);
 827        PAGE_FOR_EACH_TB(pd, tb, n) {
 828            if (page_trylock_add(set, tb->page_addr[0]) ||
 829                (tb->page_addr[1] != -1 &&
 830                 page_trylock_add(set, tb->page_addr[1]))) {
 831                /* drop all locks, and reacquire in order */
 832                g_tree_foreach(set->tree, page_entry_unlock, NULL);
 833                goto retry;
 834            }
 835        }
 836    }
 837    return set;
 838}
 839
 840void page_collection_unlock(struct page_collection *set)
 841{
 842    /* entries are unlocked and freed via page_entry_destroy */
 843    g_tree_destroy(set->tree);
 844    g_free(set);
 845}
 846
 847#endif /* !CONFIG_USER_ONLY */
 848
 849static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
 850                           PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
 851{
 852    PageDesc *p1, *p2;
 853    tb_page_addr_t page1;
 854    tb_page_addr_t page2;
 855
 856    assert_memory_lock();
 857    g_assert(phys1 != -1);
 858
 859    page1 = phys1 >> TARGET_PAGE_BITS;
 860    page2 = phys2 >> TARGET_PAGE_BITS;
 861
 862    p1 = page_find_alloc(page1, alloc);
 863    if (ret_p1) {
 864        *ret_p1 = p1;
 865    }
 866    if (likely(phys2 == -1)) {
 867        page_lock(p1);
 868        return;
 869    } else if (page1 == page2) {
 870        page_lock(p1);
 871        if (ret_p2) {
 872            *ret_p2 = p1;
 873        }
 874        return;
 875    }
 876    p2 = page_find_alloc(page2, alloc);
 877    if (ret_p2) {
 878        *ret_p2 = p2;
 879    }
 880    if (page1 < page2) {
 881        page_lock(p1);
 882        page_lock(p2);
 883    } else {
 884        page_lock(p2);
 885        page_lock(p1);
 886    }
 887}
 888
 889static bool tb_cmp(const void *ap, const void *bp)
 890{
 891    const TranslationBlock *a = ap;
 892    const TranslationBlock *b = bp;
 893
 894    return a->pc == b->pc &&
 895        a->cs_base == b->cs_base &&
 896        a->flags == b->flags &&
 897        (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
 898        a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
 899        a->page_addr[0] == b->page_addr[0] &&
 900        a->page_addr[1] == b->page_addr[1];
 901}
 902
 903void tb_htable_init(void)
 904{
 905    unsigned int mode = QHT_MODE_AUTO_RESIZE;
 906
 907    qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
 908}
 909
 910/* call with @p->lock held */
 911static inline void invalidate_page_bitmap(PageDesc *p)
 912{
 913    assert_page_locked(p);
 914#ifdef CONFIG_SOFTMMU
 915    g_free(p->code_bitmap);
 916    p->code_bitmap = NULL;
 917    p->code_write_count = 0;
 918#endif
 919}
 920
 921/* Set to NULL all the 'first_tb' fields in all PageDescs. */
 922static void page_flush_tb_1(int level, void **lp)
 923{
 924    int i;
 925
 926    if (*lp == NULL) {
 927        return;
 928    }
 929    if (level == 0) {
 930        PageDesc *pd = *lp;
 931
 932        for (i = 0; i < V_L2_SIZE; ++i) {
 933            page_lock(&pd[i]);
 934            pd[i].first_tb = (uintptr_t)NULL;
 935            invalidate_page_bitmap(pd + i);
 936            page_unlock(&pd[i]);
 937        }
 938    } else {
 939        void **pp = *lp;
 940
 941        for (i = 0; i < V_L2_SIZE; ++i) {
 942            page_flush_tb_1(level - 1, pp + i);
 943        }
 944    }
 945}
 946
 947static void page_flush_tb(void)
 948{
 949    int i, l1_sz = v_l1_size;
 950
 951    for (i = 0; i < l1_sz; i++) {
 952        page_flush_tb_1(v_l2_levels, l1_map + i);
 953    }
 954}
 955
 956static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
 957{
 958    const TranslationBlock *tb = value;
 959    size_t *size = data;
 960
 961    *size += tb->tc.size;
 962    return false;
 963}
 964
 965/* flush all the translation blocks */
 966static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
 967{
 968    bool did_flush = false;
 969
 970    mmap_lock();
 971    /* If it is already been done on request of another CPU,
 972     * just retry.
 973     */
 974    if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
 975        goto done;
 976    }
 977    did_flush = true;
 978
 979    if (DEBUG_TB_FLUSH_GATE) {
 980        size_t nb_tbs = tcg_nb_tbs();
 981        size_t host_size = 0;
 982
 983        tcg_tb_foreach(tb_host_size_iter, &host_size);
 984        printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
 985               tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
 986    }
 987
 988    CPU_FOREACH(cpu) {
 989        cpu_tb_jmp_cache_clear(cpu);
 990    }
 991
 992    qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
 993    page_flush_tb();
 994
 995    tcg_region_reset_all();
 996    /* XXX: flush processor icache at this point if cache flush is
 997       expensive */
 998    qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
 999
1000done:
1001    mmap_unlock();
1002    if (did_flush) {
1003        qemu_plugin_flush_cb();
1004    }
1005}
1006
1007void tb_flush(CPUState *cpu)
1008{
1009    if (tcg_enabled()) {
1010        unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
1011
1012        if (cpu_in_exclusive_context(cpu)) {
1013            do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1014        } else {
1015            async_safe_run_on_cpu(cpu, do_tb_flush,
1016                                  RUN_ON_CPU_HOST_INT(tb_flush_count));
1017        }
1018    }
1019}
1020
1021/*
1022 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1023 * so in order to prevent bit rot we compile them unconditionally in user-mode,
1024 * and let the optimizer get rid of them by wrapping their user-only callers
1025 * with if (DEBUG_TB_CHECK_GATE).
1026 */
1027#ifdef CONFIG_USER_ONLY
1028
1029static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1030{
1031    TranslationBlock *tb = p;
1032    target_ulong addr = *(target_ulong *)userp;
1033
1034    if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1035        printf("ERROR invalidate: address=" TARGET_FMT_lx
1036               " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1037    }
1038}
1039
1040/* verify that all the pages have correct rights for code
1041 *
1042 * Called with mmap_lock held.
1043 */
1044static void tb_invalidate_check(target_ulong address)
1045{
1046    address &= TARGET_PAGE_MASK;
1047    qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1048}
1049
1050static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1051{
1052    TranslationBlock *tb = p;
1053    int flags1, flags2;
1054
1055    flags1 = page_get_flags(tb->pc);
1056    flags2 = page_get_flags(tb->pc + tb->size - 1);
1057    if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1058        printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1059               (long)tb->pc, tb->size, flags1, flags2);
1060    }
1061}
1062
1063/* verify that all the pages have correct rights for code */
1064static void tb_page_check(void)
1065{
1066    qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1067}
1068
1069#endif /* CONFIG_USER_ONLY */
1070
1071/*
1072 * user-mode: call with mmap_lock held
1073 * !user-mode: call with @pd->lock held
1074 */
1075static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1076{
1077    TranslationBlock *tb1;
1078    uintptr_t *pprev;
1079    unsigned int n1;
1080
1081    assert_page_locked(pd);
1082    pprev = &pd->first_tb;
1083    PAGE_FOR_EACH_TB(pd, tb1, n1) {
1084        if (tb1 == tb) {
1085            *pprev = tb1->page_next[n1];
1086            return;
1087        }
1088        pprev = &tb1->page_next[n1];
1089    }
1090    g_assert_not_reached();
1091}
1092
1093/* remove @orig from its @n_orig-th jump list */
1094static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1095{
1096    uintptr_t ptr, ptr_locked;
1097    TranslationBlock *dest;
1098    TranslationBlock *tb;
1099    uintptr_t *pprev;
1100    int n;
1101
1102    /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1103    ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1104    dest = (TranslationBlock *)(ptr & ~1);
1105    if (dest == NULL) {
1106        return;
1107    }
1108
1109    qemu_spin_lock(&dest->jmp_lock);
1110    /*
1111     * While acquiring the lock, the jump might have been removed if the
1112     * destination TB was invalidated; check again.
1113     */
1114    ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
1115    if (ptr_locked != ptr) {
1116        qemu_spin_unlock(&dest->jmp_lock);
1117        /*
1118         * The only possibility is that the jump was unlinked via
1119         * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1120         * because we set the LSB above.
1121         */
1122        g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1123        return;
1124    }
1125    /*
1126     * We first acquired the lock, and since the destination pointer matches,
1127     * we know for sure that @orig is in the jmp list.
1128     */
1129    pprev = &dest->jmp_list_head;
1130    TB_FOR_EACH_JMP(dest, tb, n) {
1131        if (tb == orig && n == n_orig) {
1132            *pprev = tb->jmp_list_next[n];
1133            /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1134            qemu_spin_unlock(&dest->jmp_lock);
1135            return;
1136        }
1137        pprev = &tb->jmp_list_next[n];
1138    }
1139    g_assert_not_reached();
1140}
1141
1142/* reset the jump entry 'n' of a TB so that it is not chained to
1143   another TB */
1144static inline void tb_reset_jump(TranslationBlock *tb, int n)
1145{
1146    uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1147    tb_set_jmp_target(tb, n, addr);
1148}
1149
1150/* remove any jumps to the TB */
1151static inline void tb_jmp_unlink(TranslationBlock *dest)
1152{
1153    TranslationBlock *tb;
1154    int n;
1155
1156    qemu_spin_lock(&dest->jmp_lock);
1157
1158    TB_FOR_EACH_JMP(dest, tb, n) {
1159        tb_reset_jump(tb, n);
1160        qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1161        /* No need to clear the list entry; setting the dest ptr is enough */
1162    }
1163    dest->jmp_list_head = (uintptr_t)NULL;
1164
1165    qemu_spin_unlock(&dest->jmp_lock);
1166}
1167
1168/*
1169 * In user-mode, call with mmap_lock held.
1170 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1171 * locks held.
1172 */
1173static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1174{
1175    CPUState *cpu;
1176    PageDesc *p;
1177    uint32_t h;
1178    tb_page_addr_t phys_pc;
1179    uint32_t orig_cflags = tb_cflags(tb);
1180
1181    assert_memory_lock();
1182
1183    /* make sure no further incoming jumps will be chained to this TB */
1184    qemu_spin_lock(&tb->jmp_lock);
1185    qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1186    qemu_spin_unlock(&tb->jmp_lock);
1187
1188    /* remove the TB from the hash list */
1189    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1190    h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
1191                     tb->trace_vcpu_dstate);
1192    if (!qht_remove(&tb_ctx.htable, tb, h)) {
1193        return;
1194    }
1195
1196    /* remove the TB from the page list */
1197    if (rm_from_page_list) {
1198        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1199        tb_page_remove(p, tb);
1200        invalidate_page_bitmap(p);
1201        if (tb->page_addr[1] != -1) {
1202            p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1203            tb_page_remove(p, tb);
1204            invalidate_page_bitmap(p);
1205        }
1206    }
1207
1208    /* remove the TB from the hash list */
1209    h = tb_jmp_cache_hash_func(tb->pc);
1210    CPU_FOREACH(cpu) {
1211        if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1212            qatomic_set(&cpu->tb_jmp_cache[h], NULL);
1213        }
1214    }
1215
1216    /* suppress this TB from the two jump lists */
1217    tb_remove_from_jmp_list(tb, 0);
1218    tb_remove_from_jmp_list(tb, 1);
1219
1220    /* suppress any remaining jumps to this TB */
1221    tb_jmp_unlink(tb);
1222
1223    qatomic_set(&tb_ctx.tb_phys_invalidate_count,
1224                tb_ctx.tb_phys_invalidate_count + 1);
1225}
1226
1227static void tb_phys_invalidate__locked(TranslationBlock *tb)
1228{
1229    qemu_thread_jit_write();
1230    do_tb_phys_invalidate(tb, true);
1231    qemu_thread_jit_execute();
1232}
1233
1234/* invalidate one TB
1235 *
1236 * Called with mmap_lock held in user-mode.
1237 */
1238void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1239{
1240    if (page_addr == -1 && tb->page_addr[0] != -1) {
1241        page_lock_tb(tb);
1242        do_tb_phys_invalidate(tb, true);
1243        page_unlock_tb(tb);
1244    } else {
1245        do_tb_phys_invalidate(tb, false);
1246    }
1247}
1248
1249#ifdef CONFIG_SOFTMMU
1250/* call with @p->lock held */
1251static void build_page_bitmap(PageDesc *p)
1252{
1253    int n, tb_start, tb_end;
1254    TranslationBlock *tb;
1255
1256    assert_page_locked(p);
1257    p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1258
1259    PAGE_FOR_EACH_TB(p, tb, n) {
1260        /* NOTE: this is subtle as a TB may span two physical pages */
1261        if (n == 0) {
1262            /* NOTE: tb_end may be after the end of the page, but
1263               it is not a problem */
1264            tb_start = tb->pc & ~TARGET_PAGE_MASK;
1265            tb_end = tb_start + tb->size;
1266            if (tb_end > TARGET_PAGE_SIZE) {
1267                tb_end = TARGET_PAGE_SIZE;
1268             }
1269        } else {
1270            tb_start = 0;
1271            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1272        }
1273        bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1274    }
1275}
1276#endif
1277
1278/* add the tb in the target page and protect it if necessary
1279 *
1280 * Called with mmap_lock held for user-mode emulation.
1281 * Called with @p->lock held in !user-mode.
1282 */
1283static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1284                               unsigned int n, tb_page_addr_t page_addr)
1285{
1286#ifndef CONFIG_USER_ONLY
1287    bool page_already_protected;
1288#endif
1289
1290    assert_page_locked(p);
1291
1292    tb->page_addr[n] = page_addr;
1293    tb->page_next[n] = p->first_tb;
1294#ifndef CONFIG_USER_ONLY
1295    page_already_protected = p->first_tb != (uintptr_t)NULL;
1296#endif
1297    p->first_tb = (uintptr_t)tb | n;
1298    invalidate_page_bitmap(p);
1299
1300#if defined(CONFIG_USER_ONLY)
1301    if (p->flags & PAGE_WRITE) {
1302        target_ulong addr;
1303        PageDesc *p2;
1304        int prot;
1305
1306        /* force the host page as non writable (writes will have a
1307           page fault + mprotect overhead) */
1308        page_addr &= qemu_host_page_mask;
1309        prot = 0;
1310        for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1311            addr += TARGET_PAGE_SIZE) {
1312
1313            p2 = page_find(addr >> TARGET_PAGE_BITS);
1314            if (!p2) {
1315                continue;
1316            }
1317            prot |= p2->flags;
1318            p2->flags &= ~PAGE_WRITE;
1319          }
1320        mprotect(g2h_untagged(page_addr), qemu_host_page_size,
1321                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1322        if (DEBUG_TB_INVALIDATE_GATE) {
1323            printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1324        }
1325    }
1326#else
1327    /* if some code is already present, then the pages are already
1328       protected. So we handle the case where only the first TB is
1329       allocated in a physical page */
1330    if (!page_already_protected) {
1331        tlb_protect_code(page_addr);
1332    }
1333#endif
1334}
1335
1336/*
1337 * Add a new TB and link it to the physical page tables. phys_page2 is
1338 * (-1) to indicate that only one page contains the TB.
1339 *
1340 * Called with mmap_lock held for user-mode emulation.
1341 *
1342 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1343 * Note that in !user-mode, another thread might have already added a TB
1344 * for the same block of guest code that @tb corresponds to. In that case,
1345 * the caller should discard the original @tb, and use instead the returned TB.
1346 */
1347static TranslationBlock *
1348tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1349             tb_page_addr_t phys_page2)
1350{
1351    PageDesc *p;
1352    PageDesc *p2 = NULL;
1353    void *existing_tb = NULL;
1354    uint32_t h;
1355
1356    assert_memory_lock();
1357    tcg_debug_assert(!(tb->cflags & CF_INVALID));
1358
1359    /*
1360     * Add the TB to the page list, acquiring first the pages's locks.
1361     * We keep the locks held until after inserting the TB in the hash table,
1362     * so that if the insertion fails we know for sure that the TBs are still
1363     * in the page descriptors.
1364     * Note that inserting into the hash table first isn't an option, since
1365     * we can only insert TBs that are fully initialized.
1366     */
1367    page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1368    tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1369    if (p2) {
1370        tb_page_add(p2, tb, 1, phys_page2);
1371    } else {
1372        tb->page_addr[1] = -1;
1373    }
1374
1375    /* add in the hash table */
1376    h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags,
1377                     tb->trace_vcpu_dstate);
1378    qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1379
1380    /* remove TB from the page(s) if we couldn't insert it */
1381    if (unlikely(existing_tb)) {
1382        tb_page_remove(p, tb);
1383        invalidate_page_bitmap(p);
1384        if (p2) {
1385            tb_page_remove(p2, tb);
1386            invalidate_page_bitmap(p2);
1387        }
1388        tb = existing_tb;
1389    }
1390
1391    if (p2 && p2 != p) {
1392        page_unlock(p2);
1393    }
1394    page_unlock(p);
1395
1396#ifdef CONFIG_USER_ONLY
1397    if (DEBUG_TB_CHECK_GATE) {
1398        tb_page_check();
1399    }
1400#endif
1401    return tb;
1402}
1403
1404/* Called with mmap_lock held for user mode emulation.  */
1405TranslationBlock *tb_gen_code(CPUState *cpu,
1406                              target_ulong pc, target_ulong cs_base,
1407                              uint32_t flags, int cflags)
1408{
1409    CPUArchState *env = cpu->env_ptr;
1410    TranslationBlock *tb, *existing_tb;
1411    tb_page_addr_t phys_pc, phys_page2;
1412    target_ulong virt_page2;
1413    tcg_insn_unit *gen_code_buf;
1414    int gen_code_size, search_size, max_insns;
1415#ifdef CONFIG_PROFILER
1416    TCGProfile *prof = &tcg_ctx->prof;
1417    int64_t ti;
1418#endif
1419
1420    assert_memory_lock();
1421    qemu_thread_jit_write();
1422
1423    phys_pc = get_page_addr_code(env, pc);
1424
1425    if (phys_pc == -1) {
1426        /* XILINX. Allow prefetching more than 1 inst from MMIO */
1427        /* Generate a one-shot TB with 1 insn in it */
1428        cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO;
1429    }
1430
1431    max_insns = cflags & CF_COUNT_MASK;
1432    if (max_insns == 0) {
1433        max_insns = TCG_MAX_INSNS;
1434    }
1435    QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
1436
1437 buffer_overflow:
1438    tb = tcg_tb_alloc(tcg_ctx);
1439    if (unlikely(!tb)) {
1440        /* flush must be done */
1441        tb_flush(cpu);
1442        mmap_unlock();
1443        /* Make the execution loop process the flush as soon as possible.  */
1444        cpu->exception_index = EXCP_INTERRUPT;
1445        cpu_loop_exit(cpu);
1446    }
1447
1448    gen_code_buf = tcg_ctx->code_gen_ptr;
1449    tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
1450    tb->pc = pc;
1451    tb->cs_base = cs_base;
1452    tb->flags = flags;
1453    tb->cflags = cflags;
1454    tb->trace_vcpu_dstate = *cpu->trace_dstate;
1455    tcg_ctx->tb_cflags = cflags;
1456 tb_overflow:
1457
1458#ifdef CONFIG_PROFILER
1459    /* includes aborted translations because of exceptions */
1460    qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1461    ti = profile_getclock();
1462#endif
1463
1464    gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0);
1465    if (unlikely(gen_code_size != 0)) {
1466        goto error_return;
1467    }
1468
1469    tcg_func_start(tcg_ctx);
1470
1471    tcg_ctx->cpu = env_cpu(env);
1472    gen_intermediate_code(cpu, tb, max_insns);
1473    assert(tb->size != 0);
1474    tcg_ctx->cpu = NULL;
1475    max_insns = tb->icount;
1476
1477    trace_translate_block(tb, tb->pc, tb->tc.ptr);
1478
1479    /* generate machine code */
1480    tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1481    tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1482    tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1483    if (TCG_TARGET_HAS_direct_jump) {
1484        tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1485        tcg_ctx->tb_jmp_target_addr = NULL;
1486    } else {
1487        tcg_ctx->tb_jmp_insn_offset = NULL;
1488        tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1489    }
1490
1491#ifdef CONFIG_PROFILER
1492    qatomic_set(&prof->tb_count, prof->tb_count + 1);
1493    qatomic_set(&prof->interm_time,
1494                prof->interm_time + profile_getclock() - ti);
1495    ti = profile_getclock();
1496#endif
1497
1498    gen_code_size = tcg_gen_code(tcg_ctx, tb);
1499    if (unlikely(gen_code_size < 0)) {
1500 error_return:
1501        switch (gen_code_size) {
1502        case -1:
1503            /*
1504             * Overflow of code_gen_buffer, or the current slice of it.
1505             *
1506             * TODO: We don't need to re-do gen_intermediate_code, nor
1507             * should we re-do the tcg optimization currently hidden
1508             * inside tcg_gen_code.  All that should be required is to
1509             * flush the TBs, allocate a new TB, re-initialize it per
1510             * above, and re-do the actual code generation.
1511             */
1512            qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
1513                          "Restarting code generation for "
1514                          "code_gen_buffer overflow\n");
1515            goto buffer_overflow;
1516
1517        case -2:
1518            /*
1519             * The code generated for the TranslationBlock is too large.
1520             * The maximum size allowed by the unwind info is 64k.
1521             * There may be stricter constraints from relocations
1522             * in the tcg backend.
1523             *
1524             * Try again with half as many insns as we attempted this time.
1525             * If a single insn overflows, there's a bug somewhere...
1526             */
1527            assert(max_insns > 1);
1528            max_insns /= 2;
1529            qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
1530                          "Restarting code generation with "
1531                          "smaller translation block (max %d insns)\n",
1532                          max_insns);
1533            goto tb_overflow;
1534
1535        default:
1536            g_assert_not_reached();
1537        }
1538    }
1539    search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1540    if (unlikely(search_size < 0)) {
1541        goto buffer_overflow;
1542    }
1543    tb->tc.size = gen_code_size;
1544
1545#ifdef CONFIG_PROFILER
1546    qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1547    qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1548    qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1549    qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1550#endif
1551
1552#ifdef DEBUG_DISAS
1553    if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1554        qemu_log_in_addr_range(tb->pc)) {
1555        FILE *logfile = qemu_log_lock();
1556        int code_size, data_size;
1557        const tcg_target_ulong *rx_data_gen_ptr;
1558        size_t chunk_start;
1559        int insn = 0;
1560
1561        if (tcg_ctx->data_gen_ptr) {
1562            rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
1563            code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
1564            data_size = gen_code_size - code_size;
1565        } else {
1566            rx_data_gen_ptr = 0;
1567            code_size = gen_code_size;
1568            data_size = 0;
1569        }
1570
1571        /* Dump header and the first instruction */
1572        qemu_log("OUT: [size=%d]\n", gen_code_size);
1573        qemu_log("  -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
1574                 tcg_ctx->gen_insn_data[insn][0]);
1575        chunk_start = tcg_ctx->gen_insn_end_off[insn];
1576        log_disas(tb->tc.ptr, chunk_start);
1577
1578        /*
1579         * Dump each instruction chunk, wrapping up empty chunks into
1580         * the next instruction. The whole array is offset so the
1581         * first entry is the beginning of the 2nd instruction.
1582         */
1583        while (insn < tb->icount) {
1584            size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
1585            if (chunk_end > chunk_start) {
1586                qemu_log("  -- guest addr 0x" TARGET_FMT_lx "\n",
1587                         tcg_ctx->gen_insn_data[insn][0]);
1588                log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start);
1589                chunk_start = chunk_end;
1590            }
1591            insn++;
1592        }
1593
1594        if (chunk_start < code_size) {
1595            qemu_log("  -- tb slow paths + alignment\n");
1596            log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start);
1597        }
1598
1599        /* Finally dump any data we may have after the block */
1600        if (data_size) {
1601            int i;
1602            qemu_log("  data: [size=%d]\n", data_size);
1603            for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
1604                if (sizeof(tcg_target_ulong) == 8) {
1605                    qemu_log("0x%08" PRIxPTR ":  .quad  0x%016" TCG_PRIlx "\n",
1606                             (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
1607                } else if (sizeof(tcg_target_ulong) == 4) {
1608                    qemu_log("0x%08" PRIxPTR ":  .long  0x%08" TCG_PRIlx "\n",
1609                             (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
1610                } else {
1611                    qemu_build_not_reached();
1612                }
1613            }
1614        }
1615        qemu_log("\n");
1616        qemu_log_flush();
1617        qemu_log_unlock(logfile);
1618    }
1619#endif
1620
1621    qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
1622        ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1623                 CODE_GEN_ALIGN));
1624
1625    /* init jump list */
1626    qemu_spin_init(&tb->jmp_lock);
1627    tb->jmp_list_head = (uintptr_t)NULL;
1628    tb->jmp_list_next[0] = (uintptr_t)NULL;
1629    tb->jmp_list_next[1] = (uintptr_t)NULL;
1630    tb->jmp_dest[0] = (uintptr_t)NULL;
1631    tb->jmp_dest[1] = (uintptr_t)NULL;
1632
1633    /* init original jump addresses which have been set during tcg_gen_code() */
1634    if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1635        tb_reset_jump(tb, 0);
1636    }
1637    if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1638        tb_reset_jump(tb, 1);
1639    }
1640
1641    /*
1642     * If the TB is not associated with a physical RAM page then
1643     * it must be a temporary one-insn TB, and we have nothing to do
1644     * except fill in the page_addr[] fields. Return early before
1645     * attempting to link to other TBs or add to the lookup table.
1646     */
1647    if (phys_pc == -1) {
1648        tb->page_addr[0] = tb->page_addr[1] = -1;
1649        return tb;
1650    }
1651
1652    /*
1653     * Insert TB into the corresponding region tree before publishing it
1654     * through QHT. Otherwise rewinding happened in the TB might fail to
1655     * lookup itself using host PC.
1656     */
1657    tcg_tb_insert(tb);
1658
1659    /* check next page if needed */
1660    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1661    phys_page2 = -1;
1662    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1663        phys_page2 = get_page_addr_code(env, virt_page2);
1664    }
1665    /*
1666     * No explicit memory barrier is required -- tb_link_page() makes the
1667     * TB visible in a consistent state.
1668     */
1669    existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1670    /* if the TB already exists, discard what we just translated */
1671    if (unlikely(existing_tb != tb)) {
1672        uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1673
1674        orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1675        qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
1676        tcg_tb_remove(tb);
1677        return existing_tb;
1678    }
1679    return tb;
1680}
1681
1682/*
1683 * @p must be non-NULL.
1684 * user-mode: call with mmap_lock held.
1685 * !user-mode: call with all @pages locked.
1686 */
1687static void
1688tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1689                                      PageDesc *p, tb_page_addr_t start,
1690                                      tb_page_addr_t end,
1691                                      uintptr_t retaddr)
1692{
1693    TranslationBlock *tb;
1694    tb_page_addr_t tb_start, tb_end;
1695    int n;
1696#ifdef TARGET_HAS_PRECISE_SMC
1697    CPUState *cpu = current_cpu;
1698    CPUArchState *env = NULL;
1699    bool current_tb_not_found = retaddr != 0;
1700    bool current_tb_modified = false;
1701    TranslationBlock *current_tb = NULL;
1702    target_ulong current_pc = 0;
1703    target_ulong current_cs_base = 0;
1704    uint32_t current_flags = 0;
1705#endif /* TARGET_HAS_PRECISE_SMC */
1706
1707    assert_page_locked(p);
1708
1709#if defined(TARGET_HAS_PRECISE_SMC)
1710    if (cpu != NULL) {
1711        env = cpu->env_ptr;
1712    }
1713#endif
1714
1715    /* we remove all the TBs in the range [start, end[ */
1716    /* XXX: see if in some cases it could be faster to invalidate all
1717       the code */
1718    PAGE_FOR_EACH_TB(p, tb, n) {
1719        assert_page_locked(p);
1720        /* NOTE: this is subtle as a TB may span two physical pages */
1721        if (n == 0) {
1722            /* NOTE: tb_end may be after the end of the page, but
1723               it is not a problem */
1724            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1725            tb_end = tb_start + tb->size;
1726        } else {
1727            tb_start = tb->page_addr[1];
1728            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1729        }
1730        if (!(tb_end <= start || tb_start >= end)) {
1731#ifdef TARGET_HAS_PRECISE_SMC
1732            if (current_tb_not_found) {
1733                current_tb_not_found = false;
1734                /* now we have a real cpu fault */
1735                current_tb = tcg_tb_lookup(retaddr);
1736            }
1737            if (current_tb == tb &&
1738                (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1739                /*
1740                 * If we are modifying the current TB, we must stop
1741                 * its execution. We could be more precise by checking
1742                 * that the modification is after the current PC, but it
1743                 * would require a specialized function to partially
1744                 * restore the CPU state.
1745                 */
1746                current_tb_modified = true;
1747                cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
1748                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1749                                     &current_flags);
1750            }
1751#endif /* TARGET_HAS_PRECISE_SMC */
1752            tb_phys_invalidate__locked(tb);
1753        }
1754    }
1755#if !defined(CONFIG_USER_ONLY)
1756    /* if no code remaining, no need to continue to use slow writes */
1757    if (!p->first_tb) {
1758        invalidate_page_bitmap(p);
1759        tlb_unprotect_code(start);
1760    }
1761#endif
1762#ifdef TARGET_HAS_PRECISE_SMC
1763    if (current_tb_modified) {
1764        page_collection_unlock(pages);
1765        /* Force execution of one insn next time.  */
1766        cpu->cflags_next_tb = 1 | curr_cflags(cpu);
1767        mmap_unlock();
1768        cpu_loop_exit_noexc(cpu);
1769    }
1770#endif
1771}
1772
1773/*
1774 * Invalidate all TBs which intersect with the target physical address range
1775 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1776 * 'is_cpu_write_access' should be true if called from a real cpu write
1777 * access: the virtual CPU will exit the current TB if code is modified inside
1778 * this TB.
1779 *
1780 * Called with mmap_lock held for user-mode emulation
1781 */
1782void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
1783{
1784    struct page_collection *pages;
1785    PageDesc *p;
1786
1787    assert_memory_lock();
1788
1789    p = page_find(start >> TARGET_PAGE_BITS);
1790    if (p == NULL) {
1791        return;
1792    }
1793    pages = page_collection_lock(start, end);
1794    tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
1795    page_collection_unlock(pages);
1796}
1797
1798/*
1799 * Invalidate all TBs which intersect with the target physical address range
1800 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1801 * 'is_cpu_write_access' should be true if called from a real cpu write
1802 * access: the virtual CPU will exit the current TB if code is modified inside
1803 * this TB.
1804 *
1805 * Called with mmap_lock held for user-mode emulation.
1806 */
1807#ifdef CONFIG_SOFTMMU
1808void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
1809#else
1810void tb_invalidate_phys_range(target_ulong start, target_ulong end)
1811#endif
1812{
1813    struct page_collection *pages;
1814    tb_page_addr_t next;
1815
1816    assert_memory_lock();
1817
1818    pages = page_collection_lock(start, end);
1819    for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1820         start < end;
1821         start = next, next += TARGET_PAGE_SIZE) {
1822        PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
1823        tb_page_addr_t bound = MIN(next, end);
1824
1825        if (pd == NULL) {
1826            continue;
1827        }
1828        tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
1829    }
1830    page_collection_unlock(pages);
1831}
1832
1833#ifdef CONFIG_SOFTMMU
1834/* len must be <= 8 and start must be a multiple of len.
1835 * Called via softmmu_template.h when code areas are written to with
1836 * iothread mutex not held.
1837 *
1838 * Call with all @pages in the range [@start, @start + len[ locked.
1839 */
1840void tb_invalidate_phys_page_fast(struct page_collection *pages,
1841                                  tb_page_addr_t start, int len,
1842                                  uintptr_t retaddr)
1843{
1844    PageDesc *p;
1845
1846    assert_memory_lock();
1847
1848    p = page_find(start >> TARGET_PAGE_BITS);
1849    if (!p) {
1850        return;
1851    }
1852
1853    assert_page_locked(p);
1854    if (!p->code_bitmap &&
1855        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1856        build_page_bitmap(p);
1857    }
1858    if (p->code_bitmap) {
1859        unsigned int nr;
1860        unsigned long b;
1861
1862        nr = start & ~TARGET_PAGE_MASK;
1863        b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1864        if (b & ((1 << len) - 1)) {
1865            goto do_invalidate;
1866        }
1867    } else {
1868    do_invalidate:
1869        tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
1870                                              retaddr);
1871    }
1872}
1873#else
1874/* Called with mmap_lock held. If pc is not 0 then it indicates the
1875 * host PC of the faulting store instruction that caused this invalidate.
1876 * Returns true if the caller needs to abort execution of the current
1877 * TB (because it was modified by this store and the guest CPU has
1878 * precise-SMC semantics).
1879 */
1880static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1881{
1882    TranslationBlock *tb;
1883    PageDesc *p;
1884    int n;
1885#ifdef TARGET_HAS_PRECISE_SMC
1886    TranslationBlock *current_tb = NULL;
1887    CPUState *cpu = current_cpu;
1888    CPUArchState *env = NULL;
1889    int current_tb_modified = 0;
1890    target_ulong current_pc = 0;
1891    target_ulong current_cs_base = 0;
1892    uint32_t current_flags = 0;
1893#endif
1894
1895    assert_memory_lock();
1896
1897    addr &= TARGET_PAGE_MASK;
1898    p = page_find(addr >> TARGET_PAGE_BITS);
1899    if (!p) {
1900        return false;
1901    }
1902
1903#ifdef TARGET_HAS_PRECISE_SMC
1904    if (p->first_tb && pc != 0) {
1905        current_tb = tcg_tb_lookup(pc);
1906    }
1907    if (cpu != NULL) {
1908        env = cpu->env_ptr;
1909    }
1910#endif
1911    assert_page_locked(p);
1912    PAGE_FOR_EACH_TB(p, tb, n) {
1913#ifdef TARGET_HAS_PRECISE_SMC
1914        if (current_tb == tb &&
1915            (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1916                /* If we are modifying the current TB, we must stop
1917                   its execution. We could be more precise by checking
1918                   that the modification is after the current PC, but it
1919                   would require a specialized function to partially
1920                   restore the CPU state */
1921
1922            current_tb_modified = 1;
1923            cpu_restore_state_from_tb(cpu, current_tb, pc, true);
1924            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1925                                 &current_flags);
1926        }
1927#endif /* TARGET_HAS_PRECISE_SMC */
1928        tb_phys_invalidate(tb, addr);
1929    }
1930    p->first_tb = (uintptr_t)NULL;
1931#ifdef TARGET_HAS_PRECISE_SMC
1932    if (current_tb_modified) {
1933        /* Force execution of one insn next time.  */
1934        cpu->cflags_next_tb = 1 | curr_cflags(cpu);
1935        return true;
1936    }
1937#endif
1938
1939    return false;
1940}
1941#endif
1942
1943/* user-mode: call with mmap_lock held */
1944void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
1945{
1946    TranslationBlock *tb;
1947
1948    assert_memory_lock();
1949
1950    tb = tcg_tb_lookup(retaddr);
1951    if (tb) {
1952        /* We can use retranslation to find the PC.  */
1953        cpu_restore_state_from_tb(cpu, tb, retaddr, true);
1954        tb_phys_invalidate(tb, -1);
1955    } else {
1956        /* The exception probably happened in a helper.  The CPU state should
1957           have been saved before calling it. Fetch the PC from there.  */
1958        CPUArchState *env = cpu->env_ptr;
1959        target_ulong pc, cs_base;
1960        tb_page_addr_t addr;
1961        uint32_t flags;
1962
1963        cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1964        addr = get_page_addr_code(env, pc);
1965        if (addr != -1) {
1966            tb_invalidate_phys_range(addr, addr + 1);
1967        }
1968    }
1969}
1970
1971#ifndef CONFIG_USER_ONLY
1972/*
1973 * In deterministic execution mode, instructions doing device I/Os
1974 * must be at the end of the TB.
1975 *
1976 * Called by softmmu_template.h, with iothread mutex not held.
1977 */
1978void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1979{
1980    TranslationBlock *tb;
1981    CPUClass *cc;
1982    uint32_t n;
1983
1984    tb = tcg_tb_lookup(retaddr);
1985    if (!tb) {
1986        cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1987                  (void *)retaddr);
1988    }
1989    cpu_restore_state_from_tb(cpu, tb, retaddr, true);
1990
1991    /*
1992     * Some guests must re-execute the branch when re-executing a delay
1993     * slot instruction.  When this is the case, adjust icount and N
1994     * to account for the re-execution of the branch.
1995     */
1996    n = 1;
1997    cc = CPU_GET_CLASS(cpu);
1998    if (cc->tcg_ops->io_recompile_replay_branch &&
1999        cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
2000        cpu_neg(cpu)->icount_decr.u16.low++;
2001        n = 2;
2002    }
2003
2004    /*
2005     * Exit the loop and potentially generate a new TB executing the
2006     * just the I/O insns. We also limit instrumentation to memory
2007     * operations only (which execute after completion) so we don't
2008     * double instrument the instruction.
2009     */
2010    cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
2011
2012    qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
2013                           "cpu_io_recompile: rewound execution of TB to "
2014                           TARGET_FMT_lx "\n", tb->pc);
2015
2016    cpu_loop_exit_noexc(cpu);
2017}
2018
2019static void print_qht_statistics(struct qht_stats hst)
2020{
2021    uint32_t hgram_opts;
2022    size_t hgram_bins;
2023    char *hgram;
2024
2025    if (!hst.head_buckets) {
2026        return;
2027    }
2028    qemu_printf("TB hash buckets     %zu/%zu (%0.2f%% head buckets used)\n",
2029                hst.used_head_buckets, hst.head_buckets,
2030                (double)hst.used_head_buckets / hst.head_buckets * 100);
2031
2032    hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
2033    hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
2034    if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2035        hgram_opts |= QDIST_PR_NODECIMAL;
2036    }
2037    hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2038    qemu_printf("TB hash occupancy   %0.2f%% avg chain occ. Histogram: %s\n",
2039                qdist_avg(&hst.occupancy) * 100, hgram);
2040    g_free(hgram);
2041
2042    hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2043    hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2044    if (hgram_bins > 10) {
2045        hgram_bins = 10;
2046    } else {
2047        hgram_bins = 0;
2048        hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2049    }
2050    hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2051    qemu_printf("TB hash avg chain   %0.3f buckets. Histogram: %s\n",
2052                qdist_avg(&hst.chain), hgram);
2053    g_free(hgram);
2054}
2055
2056struct tb_tree_stats {
2057    size_t nb_tbs;
2058    size_t host_size;
2059    size_t target_size;
2060    size_t max_target_size;
2061    size_t direct_jmp_count;
2062    size_t direct_jmp2_count;
2063    size_t cross_page;
2064};
2065
2066static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2067{
2068    const TranslationBlock *tb = value;
2069    struct tb_tree_stats *tst = data;
2070
2071    tst->nb_tbs++;
2072    tst->host_size += tb->tc.size;
2073    tst->target_size += tb->size;
2074    if (tb->size > tst->max_target_size) {
2075        tst->max_target_size = tb->size;
2076    }
2077    if (tb->page_addr[1] != -1) {
2078        tst->cross_page++;
2079    }
2080    if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2081        tst->direct_jmp_count++;
2082        if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2083            tst->direct_jmp2_count++;
2084        }
2085    }
2086    return false;
2087}
2088
2089void dump_exec_info(void)
2090{
2091    struct tb_tree_stats tst = {};
2092    struct qht_stats hst;
2093    size_t nb_tbs, flush_full, flush_part, flush_elide;
2094
2095    tcg_tb_foreach(tb_tree_stats_iter, &tst);
2096    nb_tbs = tst.nb_tbs;
2097    /* XXX: avoid using doubles ? */
2098    qemu_printf("Translation buffer state:\n");
2099    /*
2100     * Report total code size including the padding and TB structs;
2101     * otherwise users might think "-accel tcg,tb-size" is not honoured.
2102     * For avg host size we use the precise numbers from tb_tree_stats though.
2103     */
2104    qemu_printf("gen code size       %zu/%zu\n",
2105                tcg_code_size(), tcg_code_capacity());
2106    qemu_printf("TB count            %zu\n", nb_tbs);
2107    qemu_printf("TB avg target size  %zu max=%zu bytes\n",
2108                nb_tbs ? tst.target_size / nb_tbs : 0,
2109                tst.max_target_size);
2110    qemu_printf("TB avg host size    %zu bytes (expansion ratio: %0.1f)\n",
2111                nb_tbs ? tst.host_size / nb_tbs : 0,
2112                tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2113    qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2114                nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2115    qemu_printf("direct jump count   %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2116                tst.direct_jmp_count,
2117                nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2118                tst.direct_jmp2_count,
2119                nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2120
2121    qht_statistics_init(&tb_ctx.htable, &hst);
2122    print_qht_statistics(hst);
2123    qht_statistics_destroy(&hst);
2124
2125    qemu_printf("\nStatistics:\n");
2126    qemu_printf("TB flush count      %u\n",
2127                qatomic_read(&tb_ctx.tb_flush_count));
2128    qemu_printf("TB invalidate count %u\n",
2129                qatomic_read(&tb_ctx.tb_phys_invalidate_count));
2130
2131    tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2132    qemu_printf("TLB full flushes    %zu\n", flush_full);
2133    qemu_printf("TLB partial flushes %zu\n", flush_part);
2134    qemu_printf("TLB elided flushes  %zu\n", flush_elide);
2135    tcg_dump_info();
2136}
2137
2138void dump_opcount_info(void)
2139{
2140    tcg_dump_op_count();
2141}
2142
2143#else /* CONFIG_USER_ONLY */
2144
2145void cpu_interrupt(CPUState *cpu, int mask)
2146{
2147    g_assert(qemu_mutex_iothread_locked());
2148    cpu->interrupt_request |= mask;
2149    qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
2150}
2151
2152/*
2153 * Walks guest process memory "regions" one by one
2154 * and calls callback function 'fn' for each region.
2155 */
2156struct walk_memory_regions_data {
2157    walk_memory_regions_fn fn;
2158    void *priv;
2159    target_ulong start;
2160    int prot;
2161};
2162
2163static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2164                                   target_ulong end, int new_prot)
2165{
2166    if (data->start != -1u) {
2167        int rc = data->fn(data->priv, data->start, end, data->prot);
2168        if (rc != 0) {
2169            return rc;
2170        }
2171    }
2172
2173    data->start = (new_prot ? end : -1u);
2174    data->prot = new_prot;
2175
2176    return 0;
2177}
2178
2179static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2180                                 target_ulong base, int level, void **lp)
2181{
2182    target_ulong pa;
2183    int i, rc;
2184
2185    if (*lp == NULL) {
2186        return walk_memory_regions_end(data, base, 0);
2187    }
2188
2189    if (level == 0) {
2190        PageDesc *pd = *lp;
2191
2192        for (i = 0; i < V_L2_SIZE; ++i) {
2193            int prot = pd[i].flags;
2194
2195            pa = base | (i << TARGET_PAGE_BITS);
2196            if (prot != data->prot) {
2197                rc = walk_memory_regions_end(data, pa, prot);
2198                if (rc != 0) {
2199                    return rc;
2200                }
2201            }
2202        }
2203    } else {
2204        void **pp = *lp;
2205
2206        for (i = 0; i < V_L2_SIZE; ++i) {
2207            pa = base | ((target_ulong)i <<
2208                (TARGET_PAGE_BITS + V_L2_BITS * level));
2209            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2210            if (rc != 0) {
2211                return rc;
2212            }
2213        }
2214    }
2215
2216    return 0;
2217}
2218
2219int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2220{
2221    struct walk_memory_regions_data data;
2222    uintptr_t i, l1_sz = v_l1_size;
2223
2224    data.fn = fn;
2225    data.priv = priv;
2226    data.start = -1u;
2227    data.prot = 0;
2228
2229    for (i = 0; i < l1_sz; i++) {
2230        target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2231        int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2232        if (rc != 0) {
2233            return rc;
2234        }
2235    }
2236
2237    return walk_memory_regions_end(&data, 0, 0);
2238}
2239
2240static int dump_region(void *priv, target_ulong start,
2241    target_ulong end, unsigned long prot)
2242{
2243    FILE *f = (FILE *)priv;
2244
2245    (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2246        " "TARGET_FMT_lx" %c%c%c\n",
2247        start, end, end - start,
2248        ((prot & PAGE_READ) ? 'r' : '-'),
2249        ((prot & PAGE_WRITE) ? 'w' : '-'),
2250        ((prot & PAGE_EXEC) ? 'x' : '-'));
2251
2252    return 0;
2253}
2254
2255/* dump memory mappings */
2256void page_dump(FILE *f)
2257{
2258    const int length = sizeof(target_ulong) * 2;
2259    (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2260            length, "start", length, "end", length, "size", "prot");
2261    walk_memory_regions(f, dump_region);
2262}
2263
2264int page_get_flags(target_ulong address)
2265{
2266    PageDesc *p;
2267
2268    p = page_find(address >> TARGET_PAGE_BITS);
2269    if (!p) {
2270        return 0;
2271    }
2272    return p->flags;
2273}
2274
2275/* Modify the flags of a page and invalidate the code if necessary.
2276   The flag PAGE_WRITE_ORG is positioned automatically depending
2277   on PAGE_WRITE.  The mmap_lock should already be held.  */
2278void page_set_flags(target_ulong start, target_ulong end, int flags)
2279{
2280    target_ulong addr, len;
2281    bool reset_target_data;
2282
2283    /* This function should never be called with addresses outside the
2284       guest address space.  If this assert fires, it probably indicates
2285       a missing call to h2g_valid.  */
2286    assert(end - 1 <= GUEST_ADDR_MAX);
2287    assert(start < end);
2288    /* Only set PAGE_ANON with new mappings. */
2289    assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
2290    assert_memory_lock();
2291
2292    start = start & TARGET_PAGE_MASK;
2293    end = TARGET_PAGE_ALIGN(end);
2294
2295    if (flags & PAGE_WRITE) {
2296        flags |= PAGE_WRITE_ORG;
2297    }
2298    reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET);
2299    flags &= ~PAGE_RESET;
2300
2301    for (addr = start, len = end - start;
2302         len != 0;
2303         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2304        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2305
2306        /* If the write protection bit is set, then we invalidate
2307           the code inside.  */
2308        if (!(p->flags & PAGE_WRITE) &&
2309            (flags & PAGE_WRITE) &&
2310            p->first_tb) {
2311            tb_invalidate_phys_page(addr, 0);
2312        }
2313        if (reset_target_data) {
2314            g_free(p->target_data);
2315            p->target_data = NULL;
2316            p->flags = flags;
2317        } else {
2318            /* Using mprotect on a page does not change MAP_ANON. */
2319            p->flags = (p->flags & PAGE_ANON) | flags;
2320        }
2321    }
2322}
2323
2324void *page_get_target_data(target_ulong address)
2325{
2326    PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
2327    return p ? p->target_data : NULL;
2328}
2329
2330void *page_alloc_target_data(target_ulong address, size_t size)
2331{
2332    PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
2333    void *ret = NULL;
2334
2335    if (p->flags & PAGE_VALID) {
2336        ret = p->target_data;
2337        if (!ret) {
2338            p->target_data = ret = g_malloc0(size);
2339        }
2340    }
2341    return ret;
2342}
2343
2344int page_check_range(target_ulong start, target_ulong len, int flags)
2345{
2346    PageDesc *p;
2347    target_ulong end;
2348    target_ulong addr;
2349
2350    /* This function should never be called with addresses outside the
2351       guest address space.  If this assert fires, it probably indicates
2352       a missing call to h2g_valid.  */
2353    if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) {
2354        assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2355    }
2356
2357    if (len == 0) {
2358        return 0;
2359    }
2360    if (start + len - 1 < start) {
2361        /* We've wrapped around.  */
2362        return -1;
2363    }
2364
2365    /* must do before we loose bits in the next step */
2366    end = TARGET_PAGE_ALIGN(start + len);
2367    start = start & TARGET_PAGE_MASK;
2368
2369    for (addr = start, len = end - start;
2370         len != 0;
2371         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2372        p = page_find(addr >> TARGET_PAGE_BITS);
2373        if (!p) {
2374            return -1;
2375        }
2376        if (!(p->flags & PAGE_VALID)) {
2377            return -1;
2378        }
2379
2380        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2381            return -1;
2382        }
2383        if (flags & PAGE_WRITE) {
2384            if (!(p->flags & PAGE_WRITE_ORG)) {
2385                return -1;
2386            }
2387            /* unprotect the page if it was put read-only because it
2388               contains translated code */
2389            if (!(p->flags & PAGE_WRITE)) {
2390                if (!page_unprotect(addr, 0)) {
2391                    return -1;
2392                }
2393            }
2394        }
2395    }
2396    return 0;
2397}
2398
2399/* called from signal handler: invalidate the code and unprotect the
2400 * page. Return 0 if the fault was not handled, 1 if it was handled,
2401 * and 2 if it was handled but the caller must cause the TB to be
2402 * immediately exited. (We can only return 2 if the 'pc' argument is
2403 * non-zero.)
2404 */
2405int page_unprotect(target_ulong address, uintptr_t pc)
2406{
2407    unsigned int prot;
2408    bool current_tb_invalidated;
2409    PageDesc *p;
2410    target_ulong host_start, host_end, addr;
2411
2412    /* Technically this isn't safe inside a signal handler.  However we
2413       know this only ever happens in a synchronous SEGV handler, so in
2414       practice it seems to be ok.  */
2415    mmap_lock();
2416
2417    p = page_find(address >> TARGET_PAGE_BITS);
2418    if (!p) {
2419        mmap_unlock();
2420        return 0;
2421    }
2422
2423    /* if the page was really writable, then we change its
2424       protection back to writable */
2425    if (p->flags & PAGE_WRITE_ORG) {
2426        current_tb_invalidated = false;
2427        if (p->flags & PAGE_WRITE) {
2428            /* If the page is actually marked WRITE then assume this is because
2429             * this thread raced with another one which got here first and
2430             * set the page to PAGE_WRITE and did the TB invalidate for us.
2431             */
2432#ifdef TARGET_HAS_PRECISE_SMC
2433            TranslationBlock *current_tb = tcg_tb_lookup(pc);
2434            if (current_tb) {
2435                current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2436            }
2437#endif
2438        } else {
2439            host_start = address & qemu_host_page_mask;
2440            host_end = host_start + qemu_host_page_size;
2441
2442            prot = 0;
2443            for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2444                p = page_find(addr >> TARGET_PAGE_BITS);
2445                p->flags |= PAGE_WRITE;
2446                prot |= p->flags;
2447
2448                /* and since the content will be modified, we must invalidate
2449                   the corresponding translated code. */
2450                current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2451#ifdef CONFIG_USER_ONLY
2452                if (DEBUG_TB_CHECK_GATE) {
2453                    tb_invalidate_check(addr);
2454                }
2455#endif
2456            }
2457            mprotect((void *)g2h_untagged(host_start), qemu_host_page_size,
2458                     prot & PAGE_BITS);
2459        }
2460        mmap_unlock();
2461        /* If current TB was invalidated return to main loop */
2462        return current_tb_invalidated ? 2 : 1;
2463    }
2464    mmap_unlock();
2465    return 0;
2466}
2467#endif /* CONFIG_USER_ONLY */
2468
2469/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2470void tcg_flush_softmmu_tlb(CPUState *cs)
2471{
2472#ifdef CONFIG_SOFTMMU
2473    tlb_flush(cs);
2474#endif
2475}
2476