qemu/accel/tcg/translate-all.c
<<
>>
Prefs
   1/*
   2 *  Host code generation
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu/units.h"
  22#include "qemu-common.h"
  23
  24#define NO_CPU_IO_DEFS
  25#include "cpu.h"
  26#include "trace.h"
  27#include "disas/disas.h"
  28#include "exec/exec-all.h"
  29#include "tcg/tcg.h"
  30#if defined(CONFIG_USER_ONLY)
  31#include "qemu.h"
  32#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  33#include <sys/param.h>
  34#if __FreeBSD_version >= 700104
  35#define HAVE_KINFO_GETVMMAP
  36#define sigqueue sigqueue_freebsd  /* avoid redefinition */
  37#include <sys/proc.h>
  38#include <machine/profile.h>
  39#define _KERNEL
  40#include <sys/user.h>
  41#undef _KERNEL
  42#undef sigqueue
  43#include <libutil.h>
  44#endif
  45#endif
  46#else
  47#include "exec/ram_addr.h"
  48#endif
  49
  50#include "exec/cputlb.h"
  51#include "exec/tb-hash.h"
  52#include "translate-all.h"
  53#include "qemu/bitmap.h"
  54#include "qemu/error-report.h"
  55#include "qemu/qemu-print.h"
  56#include "qemu/timer.h"
  57#include "qemu/main-loop.h"
  58#include "exec/log.h"
  59#include "sysemu/cpus.h"
  60#include "sysemu/tcg.h"
  61
  62/* #define DEBUG_TB_INVALIDATE */
  63/* #define DEBUG_TB_FLUSH */
  64/* make various TB consistency checks */
  65/* #define DEBUG_TB_CHECK */
  66
  67#ifdef DEBUG_TB_INVALIDATE
  68#define DEBUG_TB_INVALIDATE_GATE 1
  69#else
  70#define DEBUG_TB_INVALIDATE_GATE 0
  71#endif
  72
  73#ifdef DEBUG_TB_FLUSH
  74#define DEBUG_TB_FLUSH_GATE 1
  75#else
  76#define DEBUG_TB_FLUSH_GATE 0
  77#endif
  78
  79#if !defined(CONFIG_USER_ONLY)
  80/* TB consistency checks only implemented for usermode emulation.  */
  81#undef DEBUG_TB_CHECK
  82#endif
  83
  84#ifdef DEBUG_TB_CHECK
  85#define DEBUG_TB_CHECK_GATE 1
  86#else
  87#define DEBUG_TB_CHECK_GATE 0
  88#endif
  89
  90/* Access to the various translations structures need to be serialised via locks
  91 * for consistency.
  92 * In user-mode emulation access to the memory related structures are protected
  93 * with mmap_lock.
  94 * In !user-mode we use per-page locks.
  95 */
  96#ifdef CONFIG_SOFTMMU
  97#define assert_memory_lock()
  98#else
  99#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
 100#endif
 101
 102#define SMC_BITMAP_USE_THRESHOLD 10
 103
 104typedef struct PageDesc {
 105    /* list of TBs intersecting this ram page */
 106    uintptr_t first_tb;
 107#ifdef CONFIG_SOFTMMU
 108    /* in order to optimize self modifying code, we count the number
 109       of lookups we do to a given page to use a bitmap */
 110    unsigned long *code_bitmap;
 111    unsigned int code_write_count;
 112#else
 113    unsigned long flags;
 114#endif
 115#ifndef CONFIG_USER_ONLY
 116    QemuSpin lock;
 117#endif
 118} PageDesc;
 119
 120/**
 121 * struct page_entry - page descriptor entry
 122 * @pd:     pointer to the &struct PageDesc of the page this entry represents
 123 * @index:  page index of the page
 124 * @locked: whether the page is locked
 125 *
 126 * This struct helps us keep track of the locked state of a page, without
 127 * bloating &struct PageDesc.
 128 *
 129 * A page lock protects accesses to all fields of &struct PageDesc.
 130 *
 131 * See also: &struct page_collection.
 132 */
 133struct page_entry {
 134    PageDesc *pd;
 135    tb_page_addr_t index;
 136    bool locked;
 137};
 138
 139/**
 140 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
 141 * @tree:   Binary search tree (BST) of the pages, with key == page index
 142 * @max:    Pointer to the page in @tree with the highest page index
 143 *
 144 * To avoid deadlock we lock pages in ascending order of page index.
 145 * When operating on a set of pages, we need to keep track of them so that
 146 * we can lock them in order and also unlock them later. For this we collect
 147 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
 148 * @tree implementation we use does not provide an O(1) operation to obtain the
 149 * highest-ranked element, we use @max to keep track of the inserted page
 150 * with the highest index. This is valuable because if a page is not in
 151 * the tree and its index is higher than @max's, then we can lock it
 152 * without breaking the locking order rule.
 153 *
 154 * Note on naming: 'struct page_set' would be shorter, but we already have a few
 155 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
 156 *
 157 * See also: page_collection_lock().
 158 */
 159struct page_collection {
 160    GTree *tree;
 161    struct page_entry *max;
 162};
 163
 164/* list iterators for lists of tagged pointers in TranslationBlock */
 165#define TB_FOR_EACH_TAGGED(head, tb, n, field)                          \
 166    for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1);        \
 167         tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
 168             tb = (TranslationBlock *)((uintptr_t)tb & ~1))
 169
 170#define PAGE_FOR_EACH_TB(pagedesc, tb, n)                       \
 171    TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
 172
 173#define TB_FOR_EACH_JMP(head_tb, tb, n)                                 \
 174    TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
 175
 176/* In system mode we want L1_MAP to be based on ram offsets,
 177   while in user mode we want it to be based on virtual addresses.  */
 178#if !defined(CONFIG_USER_ONLY)
 179#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
 180# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
 181#else
 182# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
 183#endif
 184#else
 185# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
 186#endif
 187
 188/* Size of the L2 (and L3, etc) page tables.  */
 189#define V_L2_BITS 10
 190#define V_L2_SIZE (1 << V_L2_BITS)
 191
 192/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
 193QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
 194                  sizeof_field(TranslationBlock, trace_vcpu_dstate)
 195                  * BITS_PER_BYTE);
 196
 197/*
 198 * L1 Mapping properties
 199 */
 200static int v_l1_size;
 201static int v_l1_shift;
 202static int v_l2_levels;
 203
 204/* The bottom level has pointers to PageDesc, and is indexed by
 205 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
 206 */
 207#define V_L1_MIN_BITS 4
 208#define V_L1_MAX_BITS (V_L2_BITS + 3)
 209#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
 210
 211static void *l1_map[V_L1_MAX_SIZE];
 212
 213/* code generation context */
 214TCGContext tcg_init_ctx;
 215__thread TCGContext *tcg_ctx;
 216TBContext tb_ctx;
 217bool parallel_cpus;
 218
 219static void page_table_config_init(void)
 220{
 221    uint32_t v_l1_bits;
 222
 223    assert(TARGET_PAGE_BITS);
 224    /* The bits remaining after N lower levels of page tables.  */
 225    v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
 226    if (v_l1_bits < V_L1_MIN_BITS) {
 227        v_l1_bits += V_L2_BITS;
 228    }
 229
 230    v_l1_size = 1 << v_l1_bits;
 231    v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
 232    v_l2_levels = v_l1_shift / V_L2_BITS - 1;
 233
 234    assert(v_l1_bits <= V_L1_MAX_BITS);
 235    assert(v_l1_shift % V_L2_BITS == 0);
 236    assert(v_l2_levels >= 0);
 237}
 238
 239void cpu_gen_init(void)
 240{
 241    tcg_context_init(&tcg_init_ctx);
 242}
 243
 244/* Encode VAL as a signed leb128 sequence at P.
 245   Return P incremented past the encoded value.  */
 246static uint8_t *encode_sleb128(uint8_t *p, target_long val)
 247{
 248    int more, byte;
 249
 250    do {
 251        byte = val & 0x7f;
 252        val >>= 7;
 253        more = !((val == 0 && (byte & 0x40) == 0)
 254                 || (val == -1 && (byte & 0x40) != 0));
 255        if (more) {
 256            byte |= 0x80;
 257        }
 258        *p++ = byte;
 259    } while (more);
 260
 261    return p;
 262}
 263
 264/* Decode a signed leb128 sequence at *PP; increment *PP past the
 265   decoded value.  Return the decoded value.  */
 266static target_long decode_sleb128(uint8_t **pp)
 267{
 268    uint8_t *p = *pp;
 269    target_long val = 0;
 270    int byte, shift = 0;
 271
 272    do {
 273        byte = *p++;
 274        val |= (target_ulong)(byte & 0x7f) << shift;
 275        shift += 7;
 276    } while (byte & 0x80);
 277    if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
 278        val |= -(target_ulong)1 << shift;
 279    }
 280
 281    *pp = p;
 282    return val;
 283}
 284
 285/* Encode the data collected about the instructions while compiling TB.
 286   Place the data at BLOCK, and return the number of bytes consumed.
 287
 288   The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
 289   which come from the target's insn_start data, followed by a uintptr_t
 290   which comes from the host pc of the end of the code implementing the insn.
 291
 292   Each line of the table is encoded as sleb128 deltas from the previous
 293   line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
 294   That is, the first column is seeded with the guest pc, the last column
 295   with the host pc, and the middle columns with zeros.  */
 296
 297static int encode_search(TranslationBlock *tb, uint8_t *block)
 298{
 299    uint8_t *highwater = tcg_ctx->code_gen_highwater;
 300    uint8_t *p = block;
 301    int i, j, n;
 302
 303    for (i = 0, n = tb->icount; i < n; ++i) {
 304        target_ulong prev;
 305
 306        for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
 307            if (i == 0) {
 308                prev = (j == 0 ? tb->pc : 0);
 309            } else {
 310                prev = tcg_ctx->gen_insn_data[i - 1][j];
 311            }
 312            p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
 313        }
 314        prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
 315        p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
 316
 317        /* Test for (pending) buffer overflow.  The assumption is that any
 318           one row beginning below the high water mark cannot overrun
 319           the buffer completely.  Thus we can test for overflow after
 320           encoding a row without having to check during encoding.  */
 321        if (unlikely(p > highwater)) {
 322            return -1;
 323        }
 324    }
 325
 326    return p - block;
 327}
 328
 329/* The cpu state corresponding to 'searched_pc' is restored.
 330 * When reset_icount is true, current TB will be interrupted and
 331 * icount should be recalculated.
 332 */
 333static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
 334                                     uintptr_t searched_pc, bool reset_icount)
 335{
 336    target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
 337    uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
 338    CPUArchState *env = cpu->env_ptr;
 339    uint8_t *p = tb->tc.ptr + tb->tc.size;
 340    int i, j, num_insns = tb->icount;
 341#ifdef CONFIG_PROFILER
 342    TCGProfile *prof = &tcg_ctx->prof;
 343    int64_t ti = profile_getclock();
 344#endif
 345
 346    searched_pc -= GETPC_ADJ;
 347
 348    if (searched_pc < host_pc) {
 349        return -1;
 350    }
 351
 352    /* Reconstruct the stored insn data while looking for the point at
 353       which the end of the insn exceeds the searched_pc.  */
 354    for (i = 0; i < num_insns; ++i) {
 355        for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
 356            data[j] += decode_sleb128(&p);
 357        }
 358        host_pc += decode_sleb128(&p);
 359        if (host_pc > searched_pc) {
 360            goto found;
 361        }
 362    }
 363    return -1;
 364
 365 found:
 366    if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
 367        assert(use_icount);
 368        /* Reset the cycle counter to the start of the block
 369           and shift if to the number of actually executed instructions */
 370        cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
 371    }
 372    restore_state_to_opc(env, tb, data);
 373
 374#ifdef CONFIG_PROFILER
 375    atomic_set(&prof->restore_time,
 376                prof->restore_time + profile_getclock() - ti);
 377    atomic_set(&prof->restore_count, prof->restore_count + 1);
 378#endif
 379    return 0;
 380}
 381
 382bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
 383{
 384    TranslationBlock *tb;
 385    bool r = false;
 386    uintptr_t check_offset;
 387
 388    /* The host_pc has to be in the region of current code buffer. If
 389     * it is not we will not be able to resolve it here. The two cases
 390     * where host_pc will not be correct are:
 391     *
 392     *  - fault during translation (instruction fetch)
 393     *  - fault from helper (not using GETPC() macro)
 394     *
 395     * Either way we need return early as we can't resolve it here.
 396     *
 397     * We are using unsigned arithmetic so if host_pc <
 398     * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
 399     * above the code_gen_buffer_size
 400     */
 401    check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
 402
 403    if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
 404        tb = tcg_tb_lookup(host_pc);
 405        if (tb) {
 406            cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
 407            if (tb_cflags(tb) & CF_NOCACHE) {
 408                /* one-shot translation, invalidate it immediately */
 409                tb_phys_invalidate(tb, -1);
 410                tcg_tb_remove(tb);
 411            }
 412            r = true;
 413        }
 414    }
 415
 416    return r;
 417}
 418
 419static void page_init(void)
 420{
 421    page_size_init();
 422    page_table_config_init();
 423
 424#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
 425    {
 426#ifdef HAVE_KINFO_GETVMMAP
 427        struct kinfo_vmentry *freep;
 428        int i, cnt;
 429
 430        freep = kinfo_getvmmap(getpid(), &cnt);
 431        if (freep) {
 432            mmap_lock();
 433            for (i = 0; i < cnt; i++) {
 434                unsigned long startaddr, endaddr;
 435
 436                startaddr = freep[i].kve_start;
 437                endaddr = freep[i].kve_end;
 438                if (h2g_valid(startaddr)) {
 439                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
 440
 441                    if (h2g_valid(endaddr)) {
 442                        endaddr = h2g(endaddr);
 443                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
 444                    } else {
 445#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
 446                        endaddr = ~0ul;
 447                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
 448#endif
 449                    }
 450                }
 451            }
 452            free(freep);
 453            mmap_unlock();
 454        }
 455#else
 456        FILE *f;
 457
 458        last_brk = (unsigned long)sbrk(0);
 459
 460        f = fopen("/compat/linux/proc/self/maps", "r");
 461        if (f) {
 462            mmap_lock();
 463
 464            do {
 465                unsigned long startaddr, endaddr;
 466                int n;
 467
 468                n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
 469
 470                if (n == 2 && h2g_valid(startaddr)) {
 471                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
 472
 473                    if (h2g_valid(endaddr)) {
 474                        endaddr = h2g(endaddr);
 475                    } else {
 476                        endaddr = ~0ul;
 477                    }
 478                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
 479                }
 480            } while (!feof(f));
 481
 482            fclose(f);
 483            mmap_unlock();
 484        }
 485#endif
 486    }
 487#endif
 488}
 489
 490static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
 491{
 492    PageDesc *pd;
 493    void **lp;
 494    int i;
 495
 496    /* Level 1.  Always allocated.  */
 497    lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
 498
 499    /* Level 2..N-1.  */
 500    for (i = v_l2_levels; i > 0; i--) {
 501        void **p = atomic_rcu_read(lp);
 502
 503        if (p == NULL) {
 504            void *existing;
 505
 506            if (!alloc) {
 507                return NULL;
 508            }
 509            p = g_new0(void *, V_L2_SIZE);
 510            existing = atomic_cmpxchg(lp, NULL, p);
 511            if (unlikely(existing)) {
 512                g_free(p);
 513                p = existing;
 514            }
 515        }
 516
 517        lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
 518    }
 519
 520    pd = atomic_rcu_read(lp);
 521    if (pd == NULL) {
 522        void *existing;
 523
 524        if (!alloc) {
 525            return NULL;
 526        }
 527        pd = g_new0(PageDesc, V_L2_SIZE);
 528#ifndef CONFIG_USER_ONLY
 529        {
 530            int i;
 531
 532            for (i = 0; i < V_L2_SIZE; i++) {
 533                qemu_spin_init(&pd[i].lock);
 534            }
 535        }
 536#endif
 537        existing = atomic_cmpxchg(lp, NULL, pd);
 538        if (unlikely(existing)) {
 539            g_free(pd);
 540            pd = existing;
 541        }
 542    }
 543
 544    return pd + (index & (V_L2_SIZE - 1));
 545}
 546
 547static inline PageDesc *page_find(tb_page_addr_t index)
 548{
 549    return page_find_alloc(index, 0);
 550}
 551
 552static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
 553                           PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
 554
 555/* In user-mode page locks aren't used; mmap_lock is enough */
 556#ifdef CONFIG_USER_ONLY
 557
 558#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
 559
 560static inline void page_lock(PageDesc *pd)
 561{ }
 562
 563static inline void page_unlock(PageDesc *pd)
 564{ }
 565
 566static inline void page_lock_tb(const TranslationBlock *tb)
 567{ }
 568
 569static inline void page_unlock_tb(const TranslationBlock *tb)
 570{ }
 571
 572struct page_collection *
 573page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
 574{
 575    return NULL;
 576}
 577
 578void page_collection_unlock(struct page_collection *set)
 579{ }
 580#else /* !CONFIG_USER_ONLY */
 581
 582#ifdef CONFIG_DEBUG_TCG
 583
 584static __thread GHashTable *ht_pages_locked_debug;
 585
 586static void ht_pages_locked_debug_init(void)
 587{
 588    if (ht_pages_locked_debug) {
 589        return;
 590    }
 591    ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
 592}
 593
 594static bool page_is_locked(const PageDesc *pd)
 595{
 596    PageDesc *found;
 597
 598    ht_pages_locked_debug_init();
 599    found = g_hash_table_lookup(ht_pages_locked_debug, pd);
 600    return !!found;
 601}
 602
 603static void page_lock__debug(PageDesc *pd)
 604{
 605    ht_pages_locked_debug_init();
 606    g_assert(!page_is_locked(pd));
 607    g_hash_table_insert(ht_pages_locked_debug, pd, pd);
 608}
 609
 610static void page_unlock__debug(const PageDesc *pd)
 611{
 612    bool removed;
 613
 614    ht_pages_locked_debug_init();
 615    g_assert(page_is_locked(pd));
 616    removed = g_hash_table_remove(ht_pages_locked_debug, pd);
 617    g_assert(removed);
 618}
 619
 620static void
 621do_assert_page_locked(const PageDesc *pd, const char *file, int line)
 622{
 623    if (unlikely(!page_is_locked(pd))) {
 624        error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
 625                     pd, file, line);
 626        abort();
 627    }
 628}
 629
 630#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
 631
 632void assert_no_pages_locked(void)
 633{
 634    ht_pages_locked_debug_init();
 635    g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
 636}
 637
 638#else /* !CONFIG_DEBUG_TCG */
 639
 640#define assert_page_locked(pd)
 641
 642static inline void page_lock__debug(const PageDesc *pd)
 643{
 644}
 645
 646static inline void page_unlock__debug(const PageDesc *pd)
 647{
 648}
 649
 650#endif /* CONFIG_DEBUG_TCG */
 651
 652static inline void page_lock(PageDesc *pd)
 653{
 654    page_lock__debug(pd);
 655    qemu_spin_lock(&pd->lock);
 656}
 657
 658static inline void page_unlock(PageDesc *pd)
 659{
 660    qemu_spin_unlock(&pd->lock);
 661    page_unlock__debug(pd);
 662}
 663
 664/* lock the page(s) of a TB in the correct acquisition order */
 665static inline void page_lock_tb(const TranslationBlock *tb)
 666{
 667    page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
 668}
 669
 670static inline void page_unlock_tb(const TranslationBlock *tb)
 671{
 672    PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
 673
 674    page_unlock(p1);
 675    if (unlikely(tb->page_addr[1] != -1)) {
 676        PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
 677
 678        if (p2 != p1) {
 679            page_unlock(p2);
 680        }
 681    }
 682}
 683
 684static inline struct page_entry *
 685page_entry_new(PageDesc *pd, tb_page_addr_t index)
 686{
 687    struct page_entry *pe = g_malloc(sizeof(*pe));
 688
 689    pe->index = index;
 690    pe->pd = pd;
 691    pe->locked = false;
 692    return pe;
 693}
 694
 695static void page_entry_destroy(gpointer p)
 696{
 697    struct page_entry *pe = p;
 698
 699    g_assert(pe->locked);
 700    page_unlock(pe->pd);
 701    g_free(pe);
 702}
 703
 704/* returns false on success */
 705static bool page_entry_trylock(struct page_entry *pe)
 706{
 707    bool busy;
 708
 709    busy = qemu_spin_trylock(&pe->pd->lock);
 710    if (!busy) {
 711        g_assert(!pe->locked);
 712        pe->locked = true;
 713        page_lock__debug(pe->pd);
 714    }
 715    return busy;
 716}
 717
 718static void do_page_entry_lock(struct page_entry *pe)
 719{
 720    page_lock(pe->pd);
 721    g_assert(!pe->locked);
 722    pe->locked = true;
 723}
 724
 725static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
 726{
 727    struct page_entry *pe = value;
 728
 729    do_page_entry_lock(pe);
 730    return FALSE;
 731}
 732
 733static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
 734{
 735    struct page_entry *pe = value;
 736
 737    if (pe->locked) {
 738        pe->locked = false;
 739        page_unlock(pe->pd);
 740    }
 741    return FALSE;
 742}
 743
 744/*
 745 * Trylock a page, and if successful, add the page to a collection.
 746 * Returns true ("busy") if the page could not be locked; false otherwise.
 747 */
 748static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
 749{
 750    tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
 751    struct page_entry *pe;
 752    PageDesc *pd;
 753
 754    pe = g_tree_lookup(set->tree, &index);
 755    if (pe) {
 756        return false;
 757    }
 758
 759    pd = page_find(index);
 760    if (pd == NULL) {
 761        return false;
 762    }
 763
 764    pe = page_entry_new(pd, index);
 765    g_tree_insert(set->tree, &pe->index, pe);
 766
 767    /*
 768     * If this is either (1) the first insertion or (2) a page whose index
 769     * is higher than any other so far, just lock the page and move on.
 770     */
 771    if (set->max == NULL || pe->index > set->max->index) {
 772        set->max = pe;
 773        do_page_entry_lock(pe);
 774        return false;
 775    }
 776    /*
 777     * Try to acquire out-of-order lock; if busy, return busy so that we acquire
 778     * locks in order.
 779     */
 780    return page_entry_trylock(pe);
 781}
 782
 783static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
 784{
 785    tb_page_addr_t a = *(const tb_page_addr_t *)ap;
 786    tb_page_addr_t b = *(const tb_page_addr_t *)bp;
 787
 788    if (a == b) {
 789        return 0;
 790    } else if (a < b) {
 791        return -1;
 792    }
 793    return 1;
 794}
 795
 796/*
 797 * Lock a range of pages ([@start,@end[) as well as the pages of all
 798 * intersecting TBs.
 799 * Locking order: acquire locks in ascending order of page index.
 800 */
 801struct page_collection *
 802page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
 803{
 804    struct page_collection *set = g_malloc(sizeof(*set));
 805    tb_page_addr_t index;
 806    PageDesc *pd;
 807
 808    start >>= TARGET_PAGE_BITS;
 809    end   >>= TARGET_PAGE_BITS;
 810    g_assert(start <= end);
 811
 812    set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
 813                                page_entry_destroy);
 814    set->max = NULL;
 815    assert_no_pages_locked();
 816
 817 retry:
 818    g_tree_foreach(set->tree, page_entry_lock, NULL);
 819
 820    for (index = start; index <= end; index++) {
 821        TranslationBlock *tb;
 822        int n;
 823
 824        pd = page_find(index);
 825        if (pd == NULL) {
 826            continue;
 827        }
 828        if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
 829            g_tree_foreach(set->tree, page_entry_unlock, NULL);
 830            goto retry;
 831        }
 832        assert_page_locked(pd);
 833        PAGE_FOR_EACH_TB(pd, tb, n) {
 834            if (page_trylock_add(set, tb->page_addr[0]) ||
 835                (tb->page_addr[1] != -1 &&
 836                 page_trylock_add(set, tb->page_addr[1]))) {
 837                /* drop all locks, and reacquire in order */
 838                g_tree_foreach(set->tree, page_entry_unlock, NULL);
 839                goto retry;
 840            }
 841        }
 842    }
 843    return set;
 844}
 845
 846void page_collection_unlock(struct page_collection *set)
 847{
 848    /* entries are unlocked and freed via page_entry_destroy */
 849    g_tree_destroy(set->tree);
 850    g_free(set);
 851}
 852
 853#endif /* !CONFIG_USER_ONLY */
 854
 855static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
 856                           PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
 857{
 858    PageDesc *p1, *p2;
 859    tb_page_addr_t page1;
 860    tb_page_addr_t page2;
 861
 862    assert_memory_lock();
 863    g_assert(phys1 != -1);
 864
 865    page1 = phys1 >> TARGET_PAGE_BITS;
 866    page2 = phys2 >> TARGET_PAGE_BITS;
 867
 868    p1 = page_find_alloc(page1, alloc);
 869    if (ret_p1) {
 870        *ret_p1 = p1;
 871    }
 872    if (likely(phys2 == -1)) {
 873        page_lock(p1);
 874        return;
 875    } else if (page1 == page2) {
 876        page_lock(p1);
 877        if (ret_p2) {
 878            *ret_p2 = p1;
 879        }
 880        return;
 881    }
 882    p2 = page_find_alloc(page2, alloc);
 883    if (ret_p2) {
 884        *ret_p2 = p2;
 885    }
 886    if (page1 < page2) {
 887        page_lock(p1);
 888        page_lock(p2);
 889    } else {
 890        page_lock(p2);
 891        page_lock(p1);
 892    }
 893}
 894
 895/* Minimum size of the code gen buffer.  This number is randomly chosen,
 896   but not so small that we can't have a fair number of TB's live.  */
 897#define MIN_CODE_GEN_BUFFER_SIZE     (1 * MiB)
 898
 899/* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
 900   indicated, this is constrained by the range of direct branches on the
 901   host cpu, as used by the TCG implementation of goto_tb.  */
 902#if defined(__x86_64__)
 903# define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
 904#elif defined(__sparc__)
 905# define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
 906#elif defined(__powerpc64__)
 907# define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
 908#elif defined(__powerpc__)
 909# define MAX_CODE_GEN_BUFFER_SIZE  (32 * MiB)
 910#elif defined(__aarch64__)
 911# define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
 912#elif defined(__s390x__)
 913  /* We have a +- 4GB range on the branches; leave some slop.  */
 914# define MAX_CODE_GEN_BUFFER_SIZE  (3 * GiB)
 915#elif defined(__mips__)
 916  /* We have a 256MB branch region, but leave room to make sure the
 917     main executable is also within that region.  */
 918# define MAX_CODE_GEN_BUFFER_SIZE  (128 * MiB)
 919#else
 920# define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
 921#endif
 922
 923#if TCG_TARGET_REG_BITS == 32
 924#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
 925#ifdef CONFIG_USER_ONLY
 926/*
 927 * For user mode on smaller 32 bit systems we may run into trouble
 928 * allocating big chunks of data in the right place. On these systems
 929 * we utilise a static code generation buffer directly in the binary.
 930 */
 931#define USE_STATIC_CODE_GEN_BUFFER
 932#endif
 933#else /* TCG_TARGET_REG_BITS == 64 */
 934#ifdef CONFIG_USER_ONLY
 935/*
 936 * As user-mode emulation typically means running multiple instances
 937 * of the translator don't go too nuts with our default code gen
 938 * buffer lest we make things too hard for the OS.
 939 */
 940#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
 941#else
 942/*
 943 * We expect most system emulation to run one or two guests per host.
 944 * Users running large scale system emulation may want to tweak their
 945 * runtime setup via the tb-size control on the command line.
 946 */
 947#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
 948#endif
 949#endif
 950
 951#define DEFAULT_CODE_GEN_BUFFER_SIZE \
 952  (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
 953   ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
 954
 955static inline size_t size_code_gen_buffer(size_t tb_size)
 956{
 957    /* Size the buffer.  */
 958    if (tb_size == 0) {
 959        tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
 960    }
 961    if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
 962        tb_size = MIN_CODE_GEN_BUFFER_SIZE;
 963    }
 964    if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
 965        tb_size = MAX_CODE_GEN_BUFFER_SIZE;
 966    }
 967    return tb_size;
 968}
 969
 970#ifdef __mips__
 971/* In order to use J and JAL within the code_gen_buffer, we require
 972   that the buffer not cross a 256MB boundary.  */
 973static inline bool cross_256mb(void *addr, size_t size)
 974{
 975    return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
 976}
 977
 978/* We weren't able to allocate a buffer without crossing that boundary,
 979   so make do with the larger portion of the buffer that doesn't cross.
 980   Returns the new base of the buffer, and adjusts code_gen_buffer_size.  */
 981static inline void *split_cross_256mb(void *buf1, size_t size1)
 982{
 983    void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
 984    size_t size2 = buf1 + size1 - buf2;
 985
 986    size1 = buf2 - buf1;
 987    if (size1 < size2) {
 988        size1 = size2;
 989        buf1 = buf2;
 990    }
 991
 992    tcg_ctx->code_gen_buffer_size = size1;
 993    return buf1;
 994}
 995#endif
 996
 997#ifdef USE_STATIC_CODE_GEN_BUFFER
 998static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
 999    __attribute__((aligned(CODE_GEN_ALIGN)));
1000
1001static inline void *alloc_code_gen_buffer(void)
1002{
1003    void *buf = static_code_gen_buffer;
1004    void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
1005    size_t size;
1006
1007    /* page-align the beginning and end of the buffer */
1008    buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
1009    end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
1010
1011    size = end - buf;
1012
1013    /* Honor a command-line option limiting the size of the buffer.  */
1014    if (size > tcg_ctx->code_gen_buffer_size) {
1015        size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
1016                               qemu_real_host_page_size);
1017    }
1018    tcg_ctx->code_gen_buffer_size = size;
1019
1020#ifdef __mips__
1021    if (cross_256mb(buf, size)) {
1022        buf = split_cross_256mb(buf, size);
1023        size = tcg_ctx->code_gen_buffer_size;
1024    }
1025#endif
1026
1027    if (qemu_mprotect_rwx(buf, size)) {
1028        abort();
1029    }
1030    qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1031
1032    return buf;
1033}
1034#elif defined(_WIN32)
1035static inline void *alloc_code_gen_buffer(void)
1036{
1037    size_t size = tcg_ctx->code_gen_buffer_size;
1038    return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1039                        PAGE_EXECUTE_READWRITE);
1040}
1041#else
1042static inline void *alloc_code_gen_buffer(void)
1043{
1044    int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
1045    int flags = MAP_PRIVATE | MAP_ANONYMOUS;
1046    size_t size = tcg_ctx->code_gen_buffer_size;
1047    void *buf;
1048
1049    buf = mmap(NULL, size, prot, flags, -1, 0);
1050    if (buf == MAP_FAILED) {
1051        return NULL;
1052    }
1053
1054#ifdef __mips__
1055    if (cross_256mb(buf, size)) {
1056        /*
1057         * Try again, with the original still mapped, to avoid re-acquiring
1058         * the same 256mb crossing.
1059         */
1060        size_t size2;
1061        void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1062        switch ((int)(buf2 != MAP_FAILED)) {
1063        case 1:
1064            if (!cross_256mb(buf2, size)) {
1065                /* Success!  Use the new buffer.  */
1066                munmap(buf, size);
1067                break;
1068            }
1069            /* Failure.  Work with what we had.  */
1070            munmap(buf2, size);
1071            /* fallthru */
1072        default:
1073            /* Split the original buffer.  Free the smaller half.  */
1074            buf2 = split_cross_256mb(buf, size);
1075            size2 = tcg_ctx->code_gen_buffer_size;
1076            if (buf == buf2) {
1077                munmap(buf + size2, size - size2);
1078            } else {
1079                munmap(buf, size - size2);
1080            }
1081            size = size2;
1082            break;
1083        }
1084        buf = buf2;
1085    }
1086#endif
1087
1088    /* Request large pages for the buffer.  */
1089    qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1090
1091    return buf;
1092}
1093#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1094
1095static inline void code_gen_alloc(size_t tb_size)
1096{
1097    tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
1098    tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
1099    if (tcg_ctx->code_gen_buffer == NULL) {
1100        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
1101        exit(1);
1102    }
1103}
1104
1105static bool tb_cmp(const void *ap, const void *bp)
1106{
1107    const TranslationBlock *a = ap;
1108    const TranslationBlock *b = bp;
1109
1110    return a->pc == b->pc &&
1111        a->cs_base == b->cs_base &&
1112        a->flags == b->flags &&
1113        (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1114        a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1115        a->page_addr[0] == b->page_addr[0] &&
1116        a->page_addr[1] == b->page_addr[1];
1117}
1118
1119static void tb_htable_init(void)
1120{
1121    unsigned int mode = QHT_MODE_AUTO_RESIZE;
1122
1123    qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1124}
1125
1126/* Must be called before using the QEMU cpus. 'tb_size' is the size
1127   (in bytes) allocated to the translation buffer. Zero means default
1128   size. */
1129void tcg_exec_init(unsigned long tb_size)
1130{
1131    tcg_allowed = true;
1132    cpu_gen_init();
1133    page_init();
1134    tb_htable_init();
1135    code_gen_alloc(tb_size);
1136#if defined(CONFIG_SOFTMMU)
1137    /* There's no guest base to take into account, so go ahead and
1138       initialize the prologue now.  */
1139    tcg_prologue_init(tcg_ctx);
1140#endif
1141}
1142
1143/* call with @p->lock held */
1144static inline void invalidate_page_bitmap(PageDesc *p)
1145{
1146    assert_page_locked(p);
1147#ifdef CONFIG_SOFTMMU
1148    g_free(p->code_bitmap);
1149    p->code_bitmap = NULL;
1150    p->code_write_count = 0;
1151#endif
1152}
1153
1154/* Set to NULL all the 'first_tb' fields in all PageDescs. */
1155static void page_flush_tb_1(int level, void **lp)
1156{
1157    int i;
1158
1159    if (*lp == NULL) {
1160        return;
1161    }
1162    if (level == 0) {
1163        PageDesc *pd = *lp;
1164
1165        for (i = 0; i < V_L2_SIZE; ++i) {
1166            page_lock(&pd[i]);
1167            pd[i].first_tb = (uintptr_t)NULL;
1168            invalidate_page_bitmap(pd + i);
1169            page_unlock(&pd[i]);
1170        }
1171    } else {
1172        void **pp = *lp;
1173
1174        for (i = 0; i < V_L2_SIZE; ++i) {
1175            page_flush_tb_1(level - 1, pp + i);
1176        }
1177    }
1178}
1179
1180static void page_flush_tb(void)
1181{
1182    int i, l1_sz = v_l1_size;
1183
1184    for (i = 0; i < l1_sz; i++) {
1185        page_flush_tb_1(v_l2_levels, l1_map + i);
1186    }
1187}
1188
1189static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1190{
1191    const TranslationBlock *tb = value;
1192    size_t *size = data;
1193
1194    *size += tb->tc.size;
1195    return false;
1196}
1197
1198/* flush all the translation blocks */
1199static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1200{
1201    bool did_flush = false;
1202
1203    mmap_lock();
1204    /* If it is already been done on request of another CPU,
1205     * just retry.
1206     */
1207    if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1208        goto done;
1209    }
1210    did_flush = true;
1211
1212    if (DEBUG_TB_FLUSH_GATE) {
1213        size_t nb_tbs = tcg_nb_tbs();
1214        size_t host_size = 0;
1215
1216        tcg_tb_foreach(tb_host_size_iter, &host_size);
1217        printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1218               tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1219    }
1220
1221    CPU_FOREACH(cpu) {
1222        cpu_tb_jmp_cache_clear(cpu);
1223    }
1224
1225    qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1226    page_flush_tb();
1227
1228    tcg_region_reset_all();
1229    /* XXX: flush processor icache at this point if cache flush is
1230       expensive */
1231    atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1232
1233done:
1234    mmap_unlock();
1235    if (did_flush) {
1236        qemu_plugin_flush_cb();
1237    }
1238}
1239
1240void tb_flush(CPUState *cpu)
1241{
1242    if (tcg_enabled()) {
1243        unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
1244
1245        if (cpu_in_exclusive_context(cpu)) {
1246            do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1247        } else {
1248            async_safe_run_on_cpu(cpu, do_tb_flush,
1249                                  RUN_ON_CPU_HOST_INT(tb_flush_count));
1250        }
1251    }
1252}
1253
1254/*
1255 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1256 * so in order to prevent bit rot we compile them unconditionally in user-mode,
1257 * and let the optimizer get rid of them by wrapping their user-only callers
1258 * with if (DEBUG_TB_CHECK_GATE).
1259 */
1260#ifdef CONFIG_USER_ONLY
1261
1262static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1263{
1264    TranslationBlock *tb = p;
1265    target_ulong addr = *(target_ulong *)userp;
1266
1267    if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1268        printf("ERROR invalidate: address=" TARGET_FMT_lx
1269               " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1270    }
1271}
1272
1273/* verify that all the pages have correct rights for code
1274 *
1275 * Called with mmap_lock held.
1276 */
1277static void tb_invalidate_check(target_ulong address)
1278{
1279    address &= TARGET_PAGE_MASK;
1280    qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1281}
1282
1283static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1284{
1285    TranslationBlock *tb = p;
1286    int flags1, flags2;
1287
1288    flags1 = page_get_flags(tb->pc);
1289    flags2 = page_get_flags(tb->pc + tb->size - 1);
1290    if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1291        printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1292               (long)tb->pc, tb->size, flags1, flags2);
1293    }
1294}
1295
1296/* verify that all the pages have correct rights for code */
1297static void tb_page_check(void)
1298{
1299    qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1300}
1301
1302#endif /* CONFIG_USER_ONLY */
1303
1304/*
1305 * user-mode: call with mmap_lock held
1306 * !user-mode: call with @pd->lock held
1307 */
1308static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1309{
1310    TranslationBlock *tb1;
1311    uintptr_t *pprev;
1312    unsigned int n1;
1313
1314    assert_page_locked(pd);
1315    pprev = &pd->first_tb;
1316    PAGE_FOR_EACH_TB(pd, tb1, n1) {
1317        if (tb1 == tb) {
1318            *pprev = tb1->page_next[n1];
1319            return;
1320        }
1321        pprev = &tb1->page_next[n1];
1322    }
1323    g_assert_not_reached();
1324}
1325
1326/* remove @orig from its @n_orig-th jump list */
1327static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1328{
1329    uintptr_t ptr, ptr_locked;
1330    TranslationBlock *dest;
1331    TranslationBlock *tb;
1332    uintptr_t *pprev;
1333    int n;
1334
1335    /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1336    ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1337    dest = (TranslationBlock *)(ptr & ~1);
1338    if (dest == NULL) {
1339        return;
1340    }
1341
1342    qemu_spin_lock(&dest->jmp_lock);
1343    /*
1344     * While acquiring the lock, the jump might have been removed if the
1345     * destination TB was invalidated; check again.
1346     */
1347    ptr_locked = atomic_read(&orig->jmp_dest[n_orig]);
1348    if (ptr_locked != ptr) {
1349        qemu_spin_unlock(&dest->jmp_lock);
1350        /*
1351         * The only possibility is that the jump was unlinked via
1352         * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1353         * because we set the LSB above.
1354         */
1355        g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1356        return;
1357    }
1358    /*
1359     * We first acquired the lock, and since the destination pointer matches,
1360     * we know for sure that @orig is in the jmp list.
1361     */
1362    pprev = &dest->jmp_list_head;
1363    TB_FOR_EACH_JMP(dest, tb, n) {
1364        if (tb == orig && n == n_orig) {
1365            *pprev = tb->jmp_list_next[n];
1366            /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1367            qemu_spin_unlock(&dest->jmp_lock);
1368            return;
1369        }
1370        pprev = &tb->jmp_list_next[n];
1371    }
1372    g_assert_not_reached();
1373}
1374
1375/* reset the jump entry 'n' of a TB so that it is not chained to
1376   another TB */
1377static inline void tb_reset_jump(TranslationBlock *tb, int n)
1378{
1379    uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1380    tb_set_jmp_target(tb, n, addr);
1381}
1382
1383/* remove any jumps to the TB */
1384static inline void tb_jmp_unlink(TranslationBlock *dest)
1385{
1386    TranslationBlock *tb;
1387    int n;
1388
1389    qemu_spin_lock(&dest->jmp_lock);
1390
1391    TB_FOR_EACH_JMP(dest, tb, n) {
1392        tb_reset_jump(tb, n);
1393        atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1394        /* No need to clear the list entry; setting the dest ptr is enough */
1395    }
1396    dest->jmp_list_head = (uintptr_t)NULL;
1397
1398    qemu_spin_unlock(&dest->jmp_lock);
1399}
1400
1401/*
1402 * In user-mode, call with mmap_lock held.
1403 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1404 * locks held.
1405 */
1406static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1407{
1408    CPUState *cpu;
1409    PageDesc *p;
1410    uint32_t h;
1411    tb_page_addr_t phys_pc;
1412
1413    assert_memory_lock();
1414
1415    /* make sure no further incoming jumps will be chained to this TB */
1416    qemu_spin_lock(&tb->jmp_lock);
1417    atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1418    qemu_spin_unlock(&tb->jmp_lock);
1419
1420    /* remove the TB from the hash list */
1421    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1422    h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
1423                     tb->trace_vcpu_dstate);
1424    if (!(tb->cflags & CF_NOCACHE) &&
1425        !qht_remove(&tb_ctx.htable, tb, h)) {
1426        return;
1427    }
1428
1429    /* remove the TB from the page list */
1430    if (rm_from_page_list) {
1431        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1432        tb_page_remove(p, tb);
1433        invalidate_page_bitmap(p);
1434        if (tb->page_addr[1] != -1) {
1435            p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1436            tb_page_remove(p, tb);
1437            invalidate_page_bitmap(p);
1438        }
1439    }
1440
1441    /* remove the TB from the hash list */
1442    h = tb_jmp_cache_hash_func(tb->pc);
1443    CPU_FOREACH(cpu) {
1444        if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1445            atomic_set(&cpu->tb_jmp_cache[h], NULL);
1446        }
1447    }
1448
1449    /* suppress this TB from the two jump lists */
1450    tb_remove_from_jmp_list(tb, 0);
1451    tb_remove_from_jmp_list(tb, 1);
1452
1453    /* suppress any remaining jumps to this TB */
1454    tb_jmp_unlink(tb);
1455
1456    atomic_set(&tcg_ctx->tb_phys_invalidate_count,
1457               tcg_ctx->tb_phys_invalidate_count + 1);
1458}
1459
1460static void tb_phys_invalidate__locked(TranslationBlock *tb)
1461{
1462    do_tb_phys_invalidate(tb, true);
1463}
1464
1465/* invalidate one TB
1466 *
1467 * Called with mmap_lock held in user-mode.
1468 */
1469void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1470{
1471    if (page_addr == -1 && tb->page_addr[0] != -1) {
1472        page_lock_tb(tb);
1473        do_tb_phys_invalidate(tb, true);
1474        page_unlock_tb(tb);
1475    } else {
1476        do_tb_phys_invalidate(tb, false);
1477    }
1478}
1479
1480#ifdef CONFIG_SOFTMMU
1481/* call with @p->lock held */
1482static void build_page_bitmap(PageDesc *p)
1483{
1484    int n, tb_start, tb_end;
1485    TranslationBlock *tb;
1486
1487    assert_page_locked(p);
1488    p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1489
1490    PAGE_FOR_EACH_TB(p, tb, n) {
1491        /* NOTE: this is subtle as a TB may span two physical pages */
1492        if (n == 0) {
1493            /* NOTE: tb_end may be after the end of the page, but
1494               it is not a problem */
1495            tb_start = tb->pc & ~TARGET_PAGE_MASK;
1496            tb_end = tb_start + tb->size;
1497            if (tb_end > TARGET_PAGE_SIZE) {
1498                tb_end = TARGET_PAGE_SIZE;
1499             }
1500        } else {
1501            tb_start = 0;
1502            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1503        }
1504        bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1505    }
1506}
1507#endif
1508
1509/* add the tb in the target page and protect it if necessary
1510 *
1511 * Called with mmap_lock held for user-mode emulation.
1512 * Called with @p->lock held in !user-mode.
1513 */
1514static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1515                               unsigned int n, tb_page_addr_t page_addr)
1516{
1517#ifndef CONFIG_USER_ONLY
1518    bool page_already_protected;
1519#endif
1520
1521    assert_page_locked(p);
1522
1523    tb->page_addr[n] = page_addr;
1524    tb->page_next[n] = p->first_tb;
1525#ifndef CONFIG_USER_ONLY
1526    page_already_protected = p->first_tb != (uintptr_t)NULL;
1527#endif
1528    p->first_tb = (uintptr_t)tb | n;
1529    invalidate_page_bitmap(p);
1530
1531#if defined(CONFIG_USER_ONLY)
1532    if (p->flags & PAGE_WRITE) {
1533        target_ulong addr;
1534        PageDesc *p2;
1535        int prot;
1536
1537        /* force the host page as non writable (writes will have a
1538           page fault + mprotect overhead) */
1539        page_addr &= qemu_host_page_mask;
1540        prot = 0;
1541        for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1542            addr += TARGET_PAGE_SIZE) {
1543
1544            p2 = page_find(addr >> TARGET_PAGE_BITS);
1545            if (!p2) {
1546                continue;
1547            }
1548            prot |= p2->flags;
1549            p2->flags &= ~PAGE_WRITE;
1550          }
1551        mprotect(g2h(page_addr), qemu_host_page_size,
1552                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1553        if (DEBUG_TB_INVALIDATE_GATE) {
1554            printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1555        }
1556    }
1557#else
1558    /* if some code is already present, then the pages are already
1559       protected. So we handle the case where only the first TB is
1560       allocated in a physical page */
1561    if (!page_already_protected) {
1562        tlb_protect_code(page_addr);
1563    }
1564#endif
1565}
1566
1567/* add a new TB and link it to the physical page tables. phys_page2 is
1568 * (-1) to indicate that only one page contains the TB.
1569 *
1570 * Called with mmap_lock held for user-mode emulation.
1571 *
1572 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1573 * Note that in !user-mode, another thread might have already added a TB
1574 * for the same block of guest code that @tb corresponds to. In that case,
1575 * the caller should discard the original @tb, and use instead the returned TB.
1576 */
1577static TranslationBlock *
1578tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1579             tb_page_addr_t phys_page2)
1580{
1581    PageDesc *p;
1582    PageDesc *p2 = NULL;
1583
1584    assert_memory_lock();
1585
1586    if (phys_pc == -1) {
1587        /*
1588         * If the TB is not associated with a physical RAM page then
1589         * it must be a temporary one-insn TB, and we have nothing to do
1590         * except fill in the page_addr[] fields.
1591         */
1592        assert(tb->cflags & CF_NOCACHE);
1593        tb->page_addr[0] = tb->page_addr[1] = -1;
1594        return tb;
1595    }
1596
1597    /*
1598     * Add the TB to the page list, acquiring first the pages's locks.
1599     * We keep the locks held until after inserting the TB in the hash table,
1600     * so that if the insertion fails we know for sure that the TBs are still
1601     * in the page descriptors.
1602     * Note that inserting into the hash table first isn't an option, since
1603     * we can only insert TBs that are fully initialized.
1604     */
1605    page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1606    tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1607    if (p2) {
1608        tb_page_add(p2, tb, 1, phys_page2);
1609    } else {
1610        tb->page_addr[1] = -1;
1611    }
1612
1613    if (!(tb->cflags & CF_NOCACHE)) {
1614        void *existing_tb = NULL;
1615        uint32_t h;
1616
1617        /* add in the hash table */
1618        h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1619                         tb->trace_vcpu_dstate);
1620        qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1621
1622        /* remove TB from the page(s) if we couldn't insert it */
1623        if (unlikely(existing_tb)) {
1624            tb_page_remove(p, tb);
1625            invalidate_page_bitmap(p);
1626            if (p2) {
1627                tb_page_remove(p2, tb);
1628                invalidate_page_bitmap(p2);
1629            }
1630            tb = existing_tb;
1631        }
1632    }
1633
1634    if (p2 && p2 != p) {
1635        page_unlock(p2);
1636    }
1637    page_unlock(p);
1638
1639#ifdef CONFIG_USER_ONLY
1640    if (DEBUG_TB_CHECK_GATE) {
1641        tb_page_check();
1642    }
1643#endif
1644    return tb;
1645}
1646
1647/* Called with mmap_lock held for user mode emulation.  */
1648TranslationBlock *tb_gen_code(CPUState *cpu,
1649                              target_ulong pc, target_ulong cs_base,
1650                              uint32_t flags, int cflags)
1651{
1652    CPUArchState *env = cpu->env_ptr;
1653    TranslationBlock *tb, *existing_tb;
1654    tb_page_addr_t phys_pc, phys_page2;
1655    target_ulong virt_page2;
1656    tcg_insn_unit *gen_code_buf;
1657    int gen_code_size, search_size, max_insns;
1658#ifdef CONFIG_PROFILER
1659    TCGProfile *prof = &tcg_ctx->prof;
1660    int64_t ti;
1661#endif
1662
1663    assert_memory_lock();
1664
1665    phys_pc = get_page_addr_code(env, pc);
1666
1667    if (phys_pc == -1) {
1668        /* Generate a temporary TB with 1 insn in it */
1669        cflags &= ~CF_COUNT_MASK;
1670        cflags |= CF_NOCACHE | 1;
1671    }
1672
1673    cflags &= ~CF_CLUSTER_MASK;
1674    cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
1675
1676    max_insns = cflags & CF_COUNT_MASK;
1677    if (max_insns == 0) {
1678        max_insns = CF_COUNT_MASK;
1679    }
1680    if (max_insns > TCG_MAX_INSNS) {
1681        max_insns = TCG_MAX_INSNS;
1682    }
1683    if (cpu->singlestep_enabled || singlestep) {
1684        max_insns = 1;
1685    }
1686
1687 buffer_overflow:
1688    tb = tcg_tb_alloc(tcg_ctx);
1689    if (unlikely(!tb)) {
1690        /* flush must be done */
1691        tb_flush(cpu);
1692        mmap_unlock();
1693        /* Make the execution loop process the flush as soon as possible.  */
1694        cpu->exception_index = EXCP_INTERRUPT;
1695        cpu_loop_exit(cpu);
1696    }
1697
1698    gen_code_buf = tcg_ctx->code_gen_ptr;
1699    tb->tc.ptr = gen_code_buf;
1700    tb->pc = pc;
1701    tb->cs_base = cs_base;
1702    tb->flags = flags;
1703    tb->cflags = cflags;
1704    tb->orig_tb = NULL;
1705    tb->trace_vcpu_dstate = *cpu->trace_dstate;
1706    tcg_ctx->tb_cflags = cflags;
1707 tb_overflow:
1708
1709#ifdef CONFIG_PROFILER
1710    /* includes aborted translations because of exceptions */
1711    atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1712    ti = profile_getclock();
1713#endif
1714
1715    tcg_func_start(tcg_ctx);
1716
1717    tcg_ctx->cpu = env_cpu(env);
1718    gen_intermediate_code(cpu, tb, max_insns);
1719    tcg_ctx->cpu = NULL;
1720
1721    trace_translate_block(tb, tb->pc, tb->tc.ptr);
1722
1723    /* generate machine code */
1724    tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1725    tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1726    tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1727    if (TCG_TARGET_HAS_direct_jump) {
1728        tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1729        tcg_ctx->tb_jmp_target_addr = NULL;
1730    } else {
1731        tcg_ctx->tb_jmp_insn_offset = NULL;
1732        tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1733    }
1734
1735#ifdef CONFIG_PROFILER
1736    atomic_set(&prof->tb_count, prof->tb_count + 1);
1737    atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1738    ti = profile_getclock();
1739#endif
1740
1741    gen_code_size = tcg_gen_code(tcg_ctx, tb);
1742    if (unlikely(gen_code_size < 0)) {
1743        switch (gen_code_size) {
1744        case -1:
1745            /*
1746             * Overflow of code_gen_buffer, or the current slice of it.
1747             *
1748             * TODO: We don't need to re-do gen_intermediate_code, nor
1749             * should we re-do the tcg optimization currently hidden
1750             * inside tcg_gen_code.  All that should be required is to
1751             * flush the TBs, allocate a new TB, re-initialize it per
1752             * above, and re-do the actual code generation.
1753             */
1754            goto buffer_overflow;
1755
1756        case -2:
1757            /*
1758             * The code generated for the TranslationBlock is too large.
1759             * The maximum size allowed by the unwind info is 64k.
1760             * There may be stricter constraints from relocations
1761             * in the tcg backend.
1762             *
1763             * Try again with half as many insns as we attempted this time.
1764             * If a single insn overflows, there's a bug somewhere...
1765             */
1766            max_insns = tb->icount;
1767            assert(max_insns > 1);
1768            max_insns /= 2;
1769            goto tb_overflow;
1770
1771        default:
1772            g_assert_not_reached();
1773        }
1774    }
1775    search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1776    if (unlikely(search_size < 0)) {
1777        goto buffer_overflow;
1778    }
1779    tb->tc.size = gen_code_size;
1780
1781#ifdef CONFIG_PROFILER
1782    atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1783    atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1784    atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1785    atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1786#endif
1787
1788#ifdef DEBUG_DISAS
1789    if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1790        qemu_log_in_addr_range(tb->pc)) {
1791        FILE *logfile = qemu_log_lock();
1792        qemu_log("OUT: [size=%d]\n", gen_code_size);
1793        if (tcg_ctx->data_gen_ptr) {
1794            size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1795            size_t data_size = gen_code_size - code_size;
1796            size_t i;
1797
1798            log_disas(tb->tc.ptr, code_size);
1799
1800            for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1801                if (sizeof(tcg_target_ulong) == 8) {
1802                    qemu_log("0x%08" PRIxPTR ":  .quad  0x%016" PRIx64 "\n",
1803                             (uintptr_t)tcg_ctx->data_gen_ptr + i,
1804                             *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1805                } else {
1806                    qemu_log("0x%08" PRIxPTR ":  .long  0x%08x\n",
1807                             (uintptr_t)tcg_ctx->data_gen_ptr + i,
1808                             *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1809                }
1810            }
1811        } else {
1812            log_disas(tb->tc.ptr, gen_code_size);
1813        }
1814        qemu_log("\n");
1815        qemu_log_flush();
1816        qemu_log_unlock(logfile);
1817    }
1818#endif
1819
1820    atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1821        ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1822                 CODE_GEN_ALIGN));
1823
1824    /* init jump list */
1825    qemu_spin_init(&tb->jmp_lock);
1826    tb->jmp_list_head = (uintptr_t)NULL;
1827    tb->jmp_list_next[0] = (uintptr_t)NULL;
1828    tb->jmp_list_next[1] = (uintptr_t)NULL;
1829    tb->jmp_dest[0] = (uintptr_t)NULL;
1830    tb->jmp_dest[1] = (uintptr_t)NULL;
1831
1832    /* init original jump addresses which have been set during tcg_gen_code() */
1833    if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1834        tb_reset_jump(tb, 0);
1835    }
1836    if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1837        tb_reset_jump(tb, 1);
1838    }
1839
1840    /* check next page if needed */
1841    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1842    phys_page2 = -1;
1843    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1844        phys_page2 = get_page_addr_code(env, virt_page2);
1845    }
1846    /*
1847     * No explicit memory barrier is required -- tb_link_page() makes the
1848     * TB visible in a consistent state.
1849     */
1850    existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1851    /* if the TB already exists, discard what we just translated */
1852    if (unlikely(existing_tb != tb)) {
1853        uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1854
1855        orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1856        atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
1857        return existing_tb;
1858    }
1859    tcg_tb_insert(tb);
1860    return tb;
1861}
1862
1863/*
1864 * @p must be non-NULL.
1865 * user-mode: call with mmap_lock held.
1866 * !user-mode: call with all @pages locked.
1867 */
1868static void
1869tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1870                                      PageDesc *p, tb_page_addr_t start,
1871                                      tb_page_addr_t end,
1872                                      uintptr_t retaddr)
1873{
1874    TranslationBlock *tb;
1875    tb_page_addr_t tb_start, tb_end;
1876    int n;
1877#ifdef TARGET_HAS_PRECISE_SMC
1878    CPUState *cpu = current_cpu;
1879    CPUArchState *env = NULL;
1880    bool current_tb_not_found = retaddr != 0;
1881    bool current_tb_modified = false;
1882    TranslationBlock *current_tb = NULL;
1883    target_ulong current_pc = 0;
1884    target_ulong current_cs_base = 0;
1885    uint32_t current_flags = 0;
1886#endif /* TARGET_HAS_PRECISE_SMC */
1887
1888    assert_page_locked(p);
1889
1890#if defined(TARGET_HAS_PRECISE_SMC)
1891    if (cpu != NULL) {
1892        env = cpu->env_ptr;
1893    }
1894#endif
1895
1896    /* we remove all the TBs in the range [start, end[ */
1897    /* XXX: see if in some cases it could be faster to invalidate all
1898       the code */
1899    PAGE_FOR_EACH_TB(p, tb, n) {
1900        assert_page_locked(p);
1901        /* NOTE: this is subtle as a TB may span two physical pages */
1902        if (n == 0) {
1903            /* NOTE: tb_end may be after the end of the page, but
1904               it is not a problem */
1905            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1906            tb_end = tb_start + tb->size;
1907        } else {
1908            tb_start = tb->page_addr[1];
1909            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1910        }
1911        if (!(tb_end <= start || tb_start >= end)) {
1912#ifdef TARGET_HAS_PRECISE_SMC
1913            if (current_tb_not_found) {
1914                current_tb_not_found = false;
1915                /* now we have a real cpu fault */
1916                current_tb = tcg_tb_lookup(retaddr);
1917            }
1918            if (current_tb == tb &&
1919                (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1920                /*
1921                 * If we are modifying the current TB, we must stop
1922                 * its execution. We could be more precise by checking
1923                 * that the modification is after the current PC, but it
1924                 * would require a specialized function to partially
1925                 * restore the CPU state.
1926                 */
1927                current_tb_modified = true;
1928                cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
1929                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1930                                     &current_flags);
1931            }
1932#endif /* TARGET_HAS_PRECISE_SMC */
1933            tb_phys_invalidate__locked(tb);
1934        }
1935    }
1936#if !defined(CONFIG_USER_ONLY)
1937    /* if no code remaining, no need to continue to use slow writes */
1938    if (!p->first_tb) {
1939        invalidate_page_bitmap(p);
1940        tlb_unprotect_code(start);
1941    }
1942#endif
1943#ifdef TARGET_HAS_PRECISE_SMC
1944    if (current_tb_modified) {
1945        page_collection_unlock(pages);
1946        /* Force execution of one insn next time.  */
1947        cpu->cflags_next_tb = 1 | curr_cflags();
1948        mmap_unlock();
1949        cpu_loop_exit_noexc(cpu);
1950    }
1951#endif
1952}
1953
1954/*
1955 * Invalidate all TBs which intersect with the target physical address range
1956 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1957 * 'is_cpu_write_access' should be true if called from a real cpu write
1958 * access: the virtual CPU will exit the current TB if code is modified inside
1959 * this TB.
1960 *
1961 * Called with mmap_lock held for user-mode emulation
1962 */
1963void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
1964{
1965    struct page_collection *pages;
1966    PageDesc *p;
1967
1968    assert_memory_lock();
1969
1970    p = page_find(start >> TARGET_PAGE_BITS);
1971    if (p == NULL) {
1972        return;
1973    }
1974    pages = page_collection_lock(start, end);
1975    tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
1976    page_collection_unlock(pages);
1977}
1978
1979/*
1980 * Invalidate all TBs which intersect with the target physical address range
1981 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1982 * 'is_cpu_write_access' should be true if called from a real cpu write
1983 * access: the virtual CPU will exit the current TB if code is modified inside
1984 * this TB.
1985 *
1986 * Called with mmap_lock held for user-mode emulation.
1987 */
1988#ifdef CONFIG_SOFTMMU
1989void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
1990#else
1991void tb_invalidate_phys_range(target_ulong start, target_ulong end)
1992#endif
1993{
1994    struct page_collection *pages;
1995    tb_page_addr_t next;
1996
1997    assert_memory_lock();
1998
1999    pages = page_collection_lock(start, end);
2000    for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2001         start < end;
2002         start = next, next += TARGET_PAGE_SIZE) {
2003        PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
2004        tb_page_addr_t bound = MIN(next, end);
2005
2006        if (pd == NULL) {
2007            continue;
2008        }
2009        tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
2010    }
2011    page_collection_unlock(pages);
2012}
2013
2014#ifdef CONFIG_SOFTMMU
2015/* len must be <= 8 and start must be a multiple of len.
2016 * Called via softmmu_template.h when code areas are written to with
2017 * iothread mutex not held.
2018 *
2019 * Call with all @pages in the range [@start, @start + len[ locked.
2020 */
2021void tb_invalidate_phys_page_fast(struct page_collection *pages,
2022                                  tb_page_addr_t start, int len,
2023                                  uintptr_t retaddr)
2024{
2025    PageDesc *p;
2026
2027    assert_memory_lock();
2028
2029    p = page_find(start >> TARGET_PAGE_BITS);
2030    if (!p) {
2031        return;
2032    }
2033
2034    assert_page_locked(p);
2035    if (!p->code_bitmap &&
2036        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2037        build_page_bitmap(p);
2038    }
2039    if (p->code_bitmap) {
2040        unsigned int nr;
2041        unsigned long b;
2042
2043        nr = start & ~TARGET_PAGE_MASK;
2044        b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2045        if (b & ((1 << len) - 1)) {
2046            goto do_invalidate;
2047        }
2048    } else {
2049    do_invalidate:
2050        tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
2051                                              retaddr);
2052    }
2053}
2054#else
2055/* Called with mmap_lock held. If pc is not 0 then it indicates the
2056 * host PC of the faulting store instruction that caused this invalidate.
2057 * Returns true if the caller needs to abort execution of the current
2058 * TB (because it was modified by this store and the guest CPU has
2059 * precise-SMC semantics).
2060 */
2061static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2062{
2063    TranslationBlock *tb;
2064    PageDesc *p;
2065    int n;
2066#ifdef TARGET_HAS_PRECISE_SMC
2067    TranslationBlock *current_tb = NULL;
2068    CPUState *cpu = current_cpu;
2069    CPUArchState *env = NULL;
2070    int current_tb_modified = 0;
2071    target_ulong current_pc = 0;
2072    target_ulong current_cs_base = 0;
2073    uint32_t current_flags = 0;
2074#endif
2075
2076    assert_memory_lock();
2077
2078    addr &= TARGET_PAGE_MASK;
2079    p = page_find(addr >> TARGET_PAGE_BITS);
2080    if (!p) {
2081        return false;
2082    }
2083
2084#ifdef TARGET_HAS_PRECISE_SMC
2085    if (p->first_tb && pc != 0) {
2086        current_tb = tcg_tb_lookup(pc);
2087    }
2088    if (cpu != NULL) {
2089        env = cpu->env_ptr;
2090    }
2091#endif
2092    assert_page_locked(p);
2093    PAGE_FOR_EACH_TB(p, tb, n) {
2094#ifdef TARGET_HAS_PRECISE_SMC
2095        if (current_tb == tb &&
2096            (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2097                /* If we are modifying the current TB, we must stop
2098                   its execution. We could be more precise by checking
2099                   that the modification is after the current PC, but it
2100                   would require a specialized function to partially
2101                   restore the CPU state */
2102
2103            current_tb_modified = 1;
2104            cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2105            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2106                                 &current_flags);
2107        }
2108#endif /* TARGET_HAS_PRECISE_SMC */
2109        tb_phys_invalidate(tb, addr);
2110    }
2111    p->first_tb = (uintptr_t)NULL;
2112#ifdef TARGET_HAS_PRECISE_SMC
2113    if (current_tb_modified) {
2114        /* Force execution of one insn next time.  */
2115        cpu->cflags_next_tb = 1 | curr_cflags();
2116        return true;
2117    }
2118#endif
2119
2120    return false;
2121}
2122#endif
2123
2124/* user-mode: call with mmap_lock held */
2125void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
2126{
2127    TranslationBlock *tb;
2128
2129    assert_memory_lock();
2130
2131    tb = tcg_tb_lookup(retaddr);
2132    if (tb) {
2133        /* We can use retranslation to find the PC.  */
2134        cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2135        tb_phys_invalidate(tb, -1);
2136    } else {
2137        /* The exception probably happened in a helper.  The CPU state should
2138           have been saved before calling it. Fetch the PC from there.  */
2139        CPUArchState *env = cpu->env_ptr;
2140        target_ulong pc, cs_base;
2141        tb_page_addr_t addr;
2142        uint32_t flags;
2143
2144        cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2145        addr = get_page_addr_code(env, pc);
2146        if (addr != -1) {
2147            tb_invalidate_phys_range(addr, addr + 1);
2148        }
2149    }
2150}
2151
2152#ifndef CONFIG_USER_ONLY
2153/* in deterministic execution mode, instructions doing device I/Os
2154 * must be at the end of the TB.
2155 *
2156 * Called by softmmu_template.h, with iothread mutex not held.
2157 */
2158void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2159{
2160#if defined(TARGET_MIPS) || defined(TARGET_SH4)
2161    CPUArchState *env = cpu->env_ptr;
2162#endif
2163    TranslationBlock *tb;
2164    uint32_t n;
2165
2166    tb = tcg_tb_lookup(retaddr);
2167    if (!tb) {
2168        cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2169                  (void *)retaddr);
2170    }
2171    cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2172
2173    /* On MIPS and SH, delay slot instructions can only be restarted if
2174       they were already the first instruction in the TB.  If this is not
2175       the first instruction in a TB then re-execute the preceding
2176       branch.  */
2177    n = 1;
2178#if defined(TARGET_MIPS)
2179    if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2180        && env->active_tc.PC != tb->pc) {
2181        env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
2182        cpu_neg(cpu)->icount_decr.u16.low++;
2183        env->hflags &= ~MIPS_HFLAG_BMASK;
2184        n = 2;
2185    }
2186#elif defined(TARGET_SH4)
2187    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
2188        && env->pc != tb->pc) {
2189        env->pc -= 2;
2190        cpu_neg(cpu)->icount_decr.u16.low++;
2191        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
2192        n = 2;
2193    }
2194#endif
2195
2196    /* Generate a new TB executing the I/O insn.  */
2197    cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
2198
2199    if (tb_cflags(tb) & CF_NOCACHE) {
2200        if (tb->orig_tb) {
2201            /* Invalidate original TB if this TB was generated in
2202             * cpu_exec_nocache() */
2203            tb_phys_invalidate(tb->orig_tb, -1);
2204        }
2205        tcg_tb_remove(tb);
2206    }
2207
2208    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2209     * the first in the TB) then we end up generating a whole new TB and
2210     *  repeating the fault, which is horribly inefficient.
2211     *  Better would be to execute just this insn uncached, or generate a
2212     *  second new TB.
2213     */
2214    cpu_loop_exit_noexc(cpu);
2215}
2216
2217static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
2218{
2219    unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
2220
2221    for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
2222        atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
2223    }
2224}
2225
2226void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
2227{
2228    /* Discard jump cache entries for any tb which might potentially
2229       overlap the flushed page.  */
2230    tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
2231    tb_jmp_cache_clear_page(cpu, addr);
2232}
2233
2234static void print_qht_statistics(struct qht_stats hst)
2235{
2236    uint32_t hgram_opts;
2237    size_t hgram_bins;
2238    char *hgram;
2239
2240    if (!hst.head_buckets) {
2241        return;
2242    }
2243    qemu_printf("TB hash buckets     %zu/%zu (%0.2f%% head buckets used)\n",
2244                hst.used_head_buckets, hst.head_buckets,
2245                (double)hst.used_head_buckets / hst.head_buckets * 100);
2246
2247    hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
2248    hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
2249    if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2250        hgram_opts |= QDIST_PR_NODECIMAL;
2251    }
2252    hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2253    qemu_printf("TB hash occupancy   %0.2f%% avg chain occ. Histogram: %s\n",
2254                qdist_avg(&hst.occupancy) * 100, hgram);
2255    g_free(hgram);
2256
2257    hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2258    hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2259    if (hgram_bins > 10) {
2260        hgram_bins = 10;
2261    } else {
2262        hgram_bins = 0;
2263        hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2264    }
2265    hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2266    qemu_printf("TB hash avg chain   %0.3f buckets. Histogram: %s\n",
2267                qdist_avg(&hst.chain), hgram);
2268    g_free(hgram);
2269}
2270
2271struct tb_tree_stats {
2272    size_t nb_tbs;
2273    size_t host_size;
2274    size_t target_size;
2275    size_t max_target_size;
2276    size_t direct_jmp_count;
2277    size_t direct_jmp2_count;
2278    size_t cross_page;
2279};
2280
2281static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2282{
2283    const TranslationBlock *tb = value;
2284    struct tb_tree_stats *tst = data;
2285
2286    tst->nb_tbs++;
2287    tst->host_size += tb->tc.size;
2288    tst->target_size += tb->size;
2289    if (tb->size > tst->max_target_size) {
2290        tst->max_target_size = tb->size;
2291    }
2292    if (tb->page_addr[1] != -1) {
2293        tst->cross_page++;
2294    }
2295    if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2296        tst->direct_jmp_count++;
2297        if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2298            tst->direct_jmp2_count++;
2299        }
2300    }
2301    return false;
2302}
2303
2304void dump_exec_info(void)
2305{
2306    struct tb_tree_stats tst = {};
2307    struct qht_stats hst;
2308    size_t nb_tbs, flush_full, flush_part, flush_elide;
2309
2310    tcg_tb_foreach(tb_tree_stats_iter, &tst);
2311    nb_tbs = tst.nb_tbs;
2312    /* XXX: avoid using doubles ? */
2313    qemu_printf("Translation buffer state:\n");
2314    /*
2315     * Report total code size including the padding and TB structs;
2316     * otherwise users might think "-tb-size" is not honoured.
2317     * For avg host size we use the precise numbers from tb_tree_stats though.
2318     */
2319    qemu_printf("gen code size       %zu/%zu\n",
2320                tcg_code_size(), tcg_code_capacity());
2321    qemu_printf("TB count            %zu\n", nb_tbs);
2322    qemu_printf("TB avg target size  %zu max=%zu bytes\n",
2323                nb_tbs ? tst.target_size / nb_tbs : 0,
2324                tst.max_target_size);
2325    qemu_printf("TB avg host size    %zu bytes (expansion ratio: %0.1f)\n",
2326                nb_tbs ? tst.host_size / nb_tbs : 0,
2327                tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2328    qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2329                nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2330    qemu_printf("direct jump count   %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2331                tst.direct_jmp_count,
2332                nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2333                tst.direct_jmp2_count,
2334                nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2335
2336    qht_statistics_init(&tb_ctx.htable, &hst);
2337    print_qht_statistics(hst);
2338    qht_statistics_destroy(&hst);
2339
2340    qemu_printf("\nStatistics:\n");
2341    qemu_printf("TB flush count      %u\n",
2342                atomic_read(&tb_ctx.tb_flush_count));
2343    qemu_printf("TB invalidate count %zu\n",
2344                tcg_tb_phys_invalidate_count());
2345
2346    tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2347    qemu_printf("TLB full flushes    %zu\n", flush_full);
2348    qemu_printf("TLB partial flushes %zu\n", flush_part);
2349    qemu_printf("TLB elided flushes  %zu\n", flush_elide);
2350    tcg_dump_info();
2351}
2352
2353void dump_opcount_info(void)
2354{
2355    tcg_dump_op_count();
2356}
2357
2358#else /* CONFIG_USER_ONLY */
2359
2360void cpu_interrupt(CPUState *cpu, int mask)
2361{
2362    g_assert(qemu_mutex_iothread_locked());
2363    cpu->interrupt_request |= mask;
2364    atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
2365}
2366
2367/*
2368 * Walks guest process memory "regions" one by one
2369 * and calls callback function 'fn' for each region.
2370 */
2371struct walk_memory_regions_data {
2372    walk_memory_regions_fn fn;
2373    void *priv;
2374    target_ulong start;
2375    int prot;
2376};
2377
2378static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2379                                   target_ulong end, int new_prot)
2380{
2381    if (data->start != -1u) {
2382        int rc = data->fn(data->priv, data->start, end, data->prot);
2383        if (rc != 0) {
2384            return rc;
2385        }
2386    }
2387
2388    data->start = (new_prot ? end : -1u);
2389    data->prot = new_prot;
2390
2391    return 0;
2392}
2393
2394static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2395                                 target_ulong base, int level, void **lp)
2396{
2397    target_ulong pa;
2398    int i, rc;
2399
2400    if (*lp == NULL) {
2401        return walk_memory_regions_end(data, base, 0);
2402    }
2403
2404    if (level == 0) {
2405        PageDesc *pd = *lp;
2406
2407        for (i = 0; i < V_L2_SIZE; ++i) {
2408            int prot = pd[i].flags;
2409
2410            pa = base | (i << TARGET_PAGE_BITS);
2411            if (prot != data->prot) {
2412                rc = walk_memory_regions_end(data, pa, prot);
2413                if (rc != 0) {
2414                    return rc;
2415                }
2416            }
2417        }
2418    } else {
2419        void **pp = *lp;
2420
2421        for (i = 0; i < V_L2_SIZE; ++i) {
2422            pa = base | ((target_ulong)i <<
2423                (TARGET_PAGE_BITS + V_L2_BITS * level));
2424            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2425            if (rc != 0) {
2426                return rc;
2427            }
2428        }
2429    }
2430
2431    return 0;
2432}
2433
2434int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2435{
2436    struct walk_memory_regions_data data;
2437    uintptr_t i, l1_sz = v_l1_size;
2438
2439    data.fn = fn;
2440    data.priv = priv;
2441    data.start = -1u;
2442    data.prot = 0;
2443
2444    for (i = 0; i < l1_sz; i++) {
2445        target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2446        int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2447        if (rc != 0) {
2448            return rc;
2449        }
2450    }
2451
2452    return walk_memory_regions_end(&data, 0, 0);
2453}
2454
2455static int dump_region(void *priv, target_ulong start,
2456    target_ulong end, unsigned long prot)
2457{
2458    FILE *f = (FILE *)priv;
2459
2460    (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2461        " "TARGET_FMT_lx" %c%c%c\n",
2462        start, end, end - start,
2463        ((prot & PAGE_READ) ? 'r' : '-'),
2464        ((prot & PAGE_WRITE) ? 'w' : '-'),
2465        ((prot & PAGE_EXEC) ? 'x' : '-'));
2466
2467    return 0;
2468}
2469
2470/* dump memory mappings */
2471void page_dump(FILE *f)
2472{
2473    const int length = sizeof(target_ulong) * 2;
2474    (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2475            length, "start", length, "end", length, "size", "prot");
2476    walk_memory_regions(f, dump_region);
2477}
2478
2479int page_get_flags(target_ulong address)
2480{
2481    PageDesc *p;
2482
2483    p = page_find(address >> TARGET_PAGE_BITS);
2484    if (!p) {
2485        return 0;
2486    }
2487    return p->flags;
2488}
2489
2490/* Modify the flags of a page and invalidate the code if necessary.
2491   The flag PAGE_WRITE_ORG is positioned automatically depending
2492   on PAGE_WRITE.  The mmap_lock should already be held.  */
2493void page_set_flags(target_ulong start, target_ulong end, int flags)
2494{
2495    target_ulong addr, len;
2496
2497    /* This function should never be called with addresses outside the
2498       guest address space.  If this assert fires, it probably indicates
2499       a missing call to h2g_valid.  */
2500#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2501    assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2502#endif
2503    assert(start < end);
2504    assert_memory_lock();
2505
2506    start = start & TARGET_PAGE_MASK;
2507    end = TARGET_PAGE_ALIGN(end);
2508
2509    if (flags & PAGE_WRITE) {
2510        flags |= PAGE_WRITE_ORG;
2511    }
2512
2513    for (addr = start, len = end - start;
2514         len != 0;
2515         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2516        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2517
2518        /* If the write protection bit is set, then we invalidate
2519           the code inside.  */
2520        if (!(p->flags & PAGE_WRITE) &&
2521            (flags & PAGE_WRITE) &&
2522            p->first_tb) {
2523            tb_invalidate_phys_page(addr, 0);
2524        }
2525        p->flags = flags;
2526    }
2527}
2528
2529int page_check_range(target_ulong start, target_ulong len, int flags)
2530{
2531    PageDesc *p;
2532    target_ulong end;
2533    target_ulong addr;
2534
2535    /* This function should never be called with addresses outside the
2536       guest address space.  If this assert fires, it probably indicates
2537       a missing call to h2g_valid.  */
2538#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2539    assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2540#endif
2541
2542    if (len == 0) {
2543        return 0;
2544    }
2545    if (start + len - 1 < start) {
2546        /* We've wrapped around.  */
2547        return -1;
2548    }
2549
2550    /* must do before we loose bits in the next step */
2551    end = TARGET_PAGE_ALIGN(start + len);
2552    start = start & TARGET_PAGE_MASK;
2553
2554    for (addr = start, len = end - start;
2555         len != 0;
2556         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2557        p = page_find(addr >> TARGET_PAGE_BITS);
2558        if (!p) {
2559            return -1;
2560        }
2561        if (!(p->flags & PAGE_VALID)) {
2562            return -1;
2563        }
2564
2565        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2566            return -1;
2567        }
2568        if (flags & PAGE_WRITE) {
2569            if (!(p->flags & PAGE_WRITE_ORG)) {
2570                return -1;
2571            }
2572            /* unprotect the page if it was put read-only because it
2573               contains translated code */
2574            if (!(p->flags & PAGE_WRITE)) {
2575                if (!page_unprotect(addr, 0)) {
2576                    return -1;
2577                }
2578            }
2579        }
2580    }
2581    return 0;
2582}
2583
2584/* called from signal handler: invalidate the code and unprotect the
2585 * page. Return 0 if the fault was not handled, 1 if it was handled,
2586 * and 2 if it was handled but the caller must cause the TB to be
2587 * immediately exited. (We can only return 2 if the 'pc' argument is
2588 * non-zero.)
2589 */
2590int page_unprotect(target_ulong address, uintptr_t pc)
2591{
2592    unsigned int prot;
2593    bool current_tb_invalidated;
2594    PageDesc *p;
2595    target_ulong host_start, host_end, addr;
2596
2597    /* Technically this isn't safe inside a signal handler.  However we
2598       know this only ever happens in a synchronous SEGV handler, so in
2599       practice it seems to be ok.  */
2600    mmap_lock();
2601
2602    p = page_find(address >> TARGET_PAGE_BITS);
2603    if (!p) {
2604        mmap_unlock();
2605        return 0;
2606    }
2607
2608    /* if the page was really writable, then we change its
2609       protection back to writable */
2610    if (p->flags & PAGE_WRITE_ORG) {
2611        current_tb_invalidated = false;
2612        if (p->flags & PAGE_WRITE) {
2613            /* If the page is actually marked WRITE then assume this is because
2614             * this thread raced with another one which got here first and
2615             * set the page to PAGE_WRITE and did the TB invalidate for us.
2616             */
2617#ifdef TARGET_HAS_PRECISE_SMC
2618            TranslationBlock *current_tb = tcg_tb_lookup(pc);
2619            if (current_tb) {
2620                current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2621            }
2622#endif
2623        } else {
2624            host_start = address & qemu_host_page_mask;
2625            host_end = host_start + qemu_host_page_size;
2626
2627            prot = 0;
2628            for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2629                p = page_find(addr >> TARGET_PAGE_BITS);
2630                p->flags |= PAGE_WRITE;
2631                prot |= p->flags;
2632
2633                /* and since the content will be modified, we must invalidate
2634                   the corresponding translated code. */
2635                current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2636#ifdef CONFIG_USER_ONLY
2637                if (DEBUG_TB_CHECK_GATE) {
2638                    tb_invalidate_check(addr);
2639                }
2640#endif
2641            }
2642            mprotect((void *)g2h(host_start), qemu_host_page_size,
2643                     prot & PAGE_BITS);
2644        }
2645        mmap_unlock();
2646        /* If current TB was invalidated return to main loop */
2647        return current_tb_invalidated ? 2 : 1;
2648    }
2649    mmap_unlock();
2650    return 0;
2651}
2652#endif /* CONFIG_USER_ONLY */
2653
2654/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2655void tcg_flush_softmmu_tlb(CPUState *cs)
2656{
2657#ifdef CONFIG_SOFTMMU
2658    tlb_flush(cs);
2659#endif
2660}
2661