qemu/accel/tcg/translate-all.c
<<
>>
Prefs
   1/*
   2 *  Host code generation
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifdef _WIN32
  20#include <windows.h>
  21#endif
  22#include "qemu/osdep.h"
  23
  24
  25#include "qemu-common.h"
  26#define NO_CPU_IO_DEFS
  27#include "cpu.h"
  28#include "trace.h"
  29#include "disas/disas.h"
  30#include "exec/exec-all.h"
  31#include "tcg.h"
  32#if defined(CONFIG_USER_ONLY)
  33#include "qemu.h"
  34#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  35#include <sys/param.h>
  36#if __FreeBSD_version >= 700104
  37#define HAVE_KINFO_GETVMMAP
  38#define sigqueue sigqueue_freebsd  /* avoid redefinition */
  39#include <sys/proc.h>
  40#include <machine/profile.h>
  41#define _KERNEL
  42#include <sys/user.h>
  43#undef _KERNEL
  44#undef sigqueue
  45#include <libutil.h>
  46#endif
  47#endif
  48#else
  49#include "exec/ram_addr.h"
  50#endif
  51
  52#include "exec/cputlb.h"
  53#include "exec/tb-hash.h"
  54#include "translate-all.h"
  55#include "qemu/bitmap.h"
  56#include "qemu/error-report.h"
  57#include "qemu/timer.h"
  58#include "qemu/main-loop.h"
  59#include "exec/log.h"
  60#include "sysemu/cpus.h"
  61
  62/* #define DEBUG_TB_INVALIDATE */
  63/* #define DEBUG_TB_FLUSH */
  64/* make various TB consistency checks */
  65/* #define DEBUG_TB_CHECK */
  66
  67#ifdef DEBUG_TB_INVALIDATE
  68#define DEBUG_TB_INVALIDATE_GATE 1
  69#else
  70#define DEBUG_TB_INVALIDATE_GATE 0
  71#endif
  72
  73#ifdef DEBUG_TB_FLUSH
  74#define DEBUG_TB_FLUSH_GATE 1
  75#else
  76#define DEBUG_TB_FLUSH_GATE 0
  77#endif
  78
  79#if !defined(CONFIG_USER_ONLY)
  80/* TB consistency checks only implemented for usermode emulation.  */
  81#undef DEBUG_TB_CHECK
  82#endif
  83
  84#ifdef DEBUG_TB_CHECK
  85#define DEBUG_TB_CHECK_GATE 1
  86#else
  87#define DEBUG_TB_CHECK_GATE 0
  88#endif
  89
  90/* Access to the various translations structures need to be serialised via locks
  91 * for consistency.
  92 * In user-mode emulation access to the memory related structures are protected
  93 * with mmap_lock.
  94 * In !user-mode we use per-page locks.
  95 */
  96#ifdef CONFIG_SOFTMMU
  97#define assert_memory_lock()
  98#else
  99#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
 100#endif
 101
 102#define SMC_BITMAP_USE_THRESHOLD 10
 103
 104typedef struct PageDesc {
 105    /* list of TBs intersecting this ram page */
 106    uintptr_t first_tb;
 107#ifdef CONFIG_SOFTMMU
 108    /* in order to optimize self modifying code, we count the number
 109       of lookups we do to a given page to use a bitmap */
 110    unsigned long *code_bitmap;
 111    unsigned int code_write_count;
 112#else
 113    unsigned long flags;
 114#endif
 115#ifndef CONFIG_USER_ONLY
 116    QemuSpin lock;
 117#endif
 118} PageDesc;
 119
 120/**
 121 * struct page_entry - page descriptor entry
 122 * @pd:     pointer to the &struct PageDesc of the page this entry represents
 123 * @index:  page index of the page
 124 * @locked: whether the page is locked
 125 *
 126 * This struct helps us keep track of the locked state of a page, without
 127 * bloating &struct PageDesc.
 128 *
 129 * A page lock protects accesses to all fields of &struct PageDesc.
 130 *
 131 * See also: &struct page_collection.
 132 */
 133struct page_entry {
 134    PageDesc *pd;
 135    tb_page_addr_t index;
 136    bool locked;
 137};
 138
 139/**
 140 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
 141 * @tree:   Binary search tree (BST) of the pages, with key == page index
 142 * @max:    Pointer to the page in @tree with the highest page index
 143 *
 144 * To avoid deadlock we lock pages in ascending order of page index.
 145 * When operating on a set of pages, we need to keep track of them so that
 146 * we can lock them in order and also unlock them later. For this we collect
 147 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
 148 * @tree implementation we use does not provide an O(1) operation to obtain the
 149 * highest-ranked element, we use @max to keep track of the inserted page
 150 * with the highest index. This is valuable because if a page is not in
 151 * the tree and its index is higher than @max's, then we can lock it
 152 * without breaking the locking order rule.
 153 *
 154 * Note on naming: 'struct page_set' would be shorter, but we already have a few
 155 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
 156 *
 157 * See also: page_collection_lock().
 158 */
 159struct page_collection {
 160    GTree *tree;
 161    struct page_entry *max;
 162};
 163
 164/* list iterators for lists of tagged pointers in TranslationBlock */
 165#define TB_FOR_EACH_TAGGED(head, tb, n, field)                          \
 166    for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1);        \
 167         tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
 168             tb = (TranslationBlock *)((uintptr_t)tb & ~1))
 169
 170#define PAGE_FOR_EACH_TB(pagedesc, tb, n)                       \
 171    TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
 172
 173#define TB_FOR_EACH_JMP(head_tb, tb, n)                                 \
 174    TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
 175
 176/* In system mode we want L1_MAP to be based on ram offsets,
 177   while in user mode we want it to be based on virtual addresses.  */
 178#if !defined(CONFIG_USER_ONLY)
 179#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
 180# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
 181#else
 182# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
 183#endif
 184#else
 185# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
 186#endif
 187
 188/* Size of the L2 (and L3, etc) page tables.  */
 189#define V_L2_BITS 10
 190#define V_L2_SIZE (1 << V_L2_BITS)
 191
 192/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
 193QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
 194                  sizeof_field(TranslationBlock, trace_vcpu_dstate)
 195                  * BITS_PER_BYTE);
 196
 197/*
 198 * L1 Mapping properties
 199 */
 200static int v_l1_size;
 201static int v_l1_shift;
 202static int v_l2_levels;
 203
 204/* The bottom level has pointers to PageDesc, and is indexed by
 205 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
 206 */
 207#define V_L1_MIN_BITS 4
 208#define V_L1_MAX_BITS (V_L2_BITS + 3)
 209#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
 210
 211static void *l1_map[V_L1_MAX_SIZE];
 212
 213/* code generation context */
 214TCGContext tcg_init_ctx;
 215__thread TCGContext *tcg_ctx;
 216TBContext tb_ctx;
 217bool parallel_cpus;
 218
 219static void page_table_config_init(void)
 220{
 221    uint32_t v_l1_bits;
 222
 223    assert(TARGET_PAGE_BITS);
 224    /* The bits remaining after N lower levels of page tables.  */
 225    v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
 226    if (v_l1_bits < V_L1_MIN_BITS) {
 227        v_l1_bits += V_L2_BITS;
 228    }
 229
 230    v_l1_size = 1 << v_l1_bits;
 231    v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
 232    v_l2_levels = v_l1_shift / V_L2_BITS - 1;
 233
 234    assert(v_l1_bits <= V_L1_MAX_BITS);
 235    assert(v_l1_shift % V_L2_BITS == 0);
 236    assert(v_l2_levels >= 0);
 237}
 238
 239void cpu_gen_init(void)
 240{
 241    tcg_context_init(&tcg_init_ctx);
 242}
 243
 244/* Encode VAL as a signed leb128 sequence at P.
 245   Return P incremented past the encoded value.  */
 246static uint8_t *encode_sleb128(uint8_t *p, target_long val)
 247{
 248    int more, byte;
 249
 250    do {
 251        byte = val & 0x7f;
 252        val >>= 7;
 253        more = !((val == 0 && (byte & 0x40) == 0)
 254                 || (val == -1 && (byte & 0x40) != 0));
 255        if (more) {
 256            byte |= 0x80;
 257        }
 258        *p++ = byte;
 259    } while (more);
 260
 261    return p;
 262}
 263
 264/* Decode a signed leb128 sequence at *PP; increment *PP past the
 265   decoded value.  Return the decoded value.  */
 266static target_long decode_sleb128(uint8_t **pp)
 267{
 268    uint8_t *p = *pp;
 269    target_long val = 0;
 270    int byte, shift = 0;
 271
 272    do {
 273        byte = *p++;
 274        val |= (target_ulong)(byte & 0x7f) << shift;
 275        shift += 7;
 276    } while (byte & 0x80);
 277    if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
 278        val |= -(target_ulong)1 << shift;
 279    }
 280
 281    *pp = p;
 282    return val;
 283}
 284
 285/* Encode the data collected about the instructions while compiling TB.
 286   Place the data at BLOCK, and return the number of bytes consumed.
 287
 288   The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
 289   which come from the target's insn_start data, followed by a uintptr_t
 290   which comes from the host pc of the end of the code implementing the insn.
 291
 292   Each line of the table is encoded as sleb128 deltas from the previous
 293   line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
 294   That is, the first column is seeded with the guest pc, the last column
 295   with the host pc, and the middle columns with zeros.  */
 296
 297static int encode_search(TranslationBlock *tb, uint8_t *block)
 298{
 299    uint8_t *highwater = tcg_ctx->code_gen_highwater;
 300    uint8_t *p = block;
 301    int i, j, n;
 302
 303    for (i = 0, n = tb->icount; i < n; ++i) {
 304        target_ulong prev;
 305
 306        for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
 307            if (i == 0) {
 308                prev = (j == 0 ? tb->pc : 0);
 309            } else {
 310                prev = tcg_ctx->gen_insn_data[i - 1][j];
 311            }
 312            p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
 313        }
 314        prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
 315        p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
 316
 317        /* Test for (pending) buffer overflow.  The assumption is that any
 318           one row beginning below the high water mark cannot overrun
 319           the buffer completely.  Thus we can test for overflow after
 320           encoding a row without having to check during encoding.  */
 321        if (unlikely(p > highwater)) {
 322            return -1;
 323        }
 324    }
 325
 326    return p - block;
 327}
 328
 329/* The cpu state corresponding to 'searched_pc' is restored.
 330 * When reset_icount is true, current TB will be interrupted and
 331 * icount should be recalculated.
 332 */
 333static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
 334                                     uintptr_t searched_pc, bool reset_icount)
 335{
 336    target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
 337    uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
 338    CPUArchState *env = cpu->env_ptr;
 339    uint8_t *p = tb->tc.ptr + tb->tc.size;
 340    int i, j, num_insns = tb->icount;
 341#ifdef CONFIG_PROFILER
 342    TCGProfile *prof = &tcg_ctx->prof;
 343    int64_t ti = profile_getclock();
 344#endif
 345
 346    searched_pc -= GETPC_ADJ;
 347
 348    if (searched_pc < host_pc) {
 349        return -1;
 350    }
 351
 352    /* Reconstruct the stored insn data while looking for the point at
 353       which the end of the insn exceeds the searched_pc.  */
 354    for (i = 0; i < num_insns; ++i) {
 355        for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
 356            data[j] += decode_sleb128(&p);
 357        }
 358        host_pc += decode_sleb128(&p);
 359        if (host_pc > searched_pc) {
 360            goto found;
 361        }
 362    }
 363    return -1;
 364
 365 found:
 366    if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
 367        assert(use_icount);
 368        /* Reset the cycle counter to the start of the block
 369           and shift if to the number of actually executed instructions */
 370        cpu->icount_decr.u16.low += num_insns - i;
 371    }
 372    restore_state_to_opc(env, tb, data);
 373
 374#ifdef CONFIG_PROFILER
 375    atomic_set(&prof->restore_time,
 376                prof->restore_time + profile_getclock() - ti);
 377    atomic_set(&prof->restore_count, prof->restore_count + 1);
 378#endif
 379    return 0;
 380}
 381
 382bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
 383{
 384    TranslationBlock *tb;
 385    bool r = false;
 386    uintptr_t check_offset;
 387
 388    /* The host_pc has to be in the region of current code buffer. If
 389     * it is not we will not be able to resolve it here. The two cases
 390     * where host_pc will not be correct are:
 391     *
 392     *  - fault during translation (instruction fetch)
 393     *  - fault from helper (not using GETPC() macro)
 394     *
 395     * Either way we need return early as we can't resolve it here.
 396     *
 397     * We are using unsigned arithmetic so if host_pc <
 398     * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
 399     * above the code_gen_buffer_size
 400     */
 401    check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
 402
 403    if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
 404        tb = tcg_tb_lookup(host_pc);
 405        if (tb) {
 406            cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
 407            if (tb_cflags(tb) & CF_NOCACHE) {
 408                /* one-shot translation, invalidate it immediately */
 409                tb_phys_invalidate(tb, -1);
 410                tcg_tb_remove(tb);
 411            }
 412            r = true;
 413        }
 414    }
 415
 416    return r;
 417}
 418
 419static void page_init(void)
 420{
 421    page_size_init();
 422    page_table_config_init();
 423
 424#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
 425    {
 426#ifdef HAVE_KINFO_GETVMMAP
 427        struct kinfo_vmentry *freep;
 428        int i, cnt;
 429
 430        freep = kinfo_getvmmap(getpid(), &cnt);
 431        if (freep) {
 432            mmap_lock();
 433            for (i = 0; i < cnt; i++) {
 434                unsigned long startaddr, endaddr;
 435
 436                startaddr = freep[i].kve_start;
 437                endaddr = freep[i].kve_end;
 438                if (h2g_valid(startaddr)) {
 439                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
 440
 441                    if (h2g_valid(endaddr)) {
 442                        endaddr = h2g(endaddr);
 443                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
 444                    } else {
 445#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
 446                        endaddr = ~0ul;
 447                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
 448#endif
 449                    }
 450                }
 451            }
 452            free(freep);
 453            mmap_unlock();
 454        }
 455#else
 456        FILE *f;
 457
 458        last_brk = (unsigned long)sbrk(0);
 459
 460        f = fopen("/compat/linux/proc/self/maps", "r");
 461        if (f) {
 462            mmap_lock();
 463
 464            do {
 465                unsigned long startaddr, endaddr;
 466                int n;
 467
 468                n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
 469
 470                if (n == 2 && h2g_valid(startaddr)) {
 471                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
 472
 473                    if (h2g_valid(endaddr)) {
 474                        endaddr = h2g(endaddr);
 475                    } else {
 476                        endaddr = ~0ul;
 477                    }
 478                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
 479                }
 480            } while (!feof(f));
 481
 482            fclose(f);
 483            mmap_unlock();
 484        }
 485#endif
 486    }
 487#endif
 488}
 489
 490static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
 491{
 492    PageDesc *pd;
 493    void **lp;
 494    int i;
 495
 496    /* Level 1.  Always allocated.  */
 497    lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
 498
 499    /* Level 2..N-1.  */
 500    for (i = v_l2_levels; i > 0; i--) {
 501        void **p = atomic_rcu_read(lp);
 502
 503        if (p == NULL) {
 504            void *existing;
 505
 506            if (!alloc) {
 507                return NULL;
 508            }
 509            p = g_new0(void *, V_L2_SIZE);
 510            existing = atomic_cmpxchg(lp, NULL, p);
 511            if (unlikely(existing)) {
 512                g_free(p);
 513                p = existing;
 514            }
 515        }
 516
 517        lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
 518    }
 519
 520    pd = atomic_rcu_read(lp);
 521    if (pd == NULL) {
 522        void *existing;
 523
 524        if (!alloc) {
 525            return NULL;
 526        }
 527        pd = g_new0(PageDesc, V_L2_SIZE);
 528#ifndef CONFIG_USER_ONLY
 529        {
 530            int i;
 531
 532            for (i = 0; i < V_L2_SIZE; i++) {
 533                qemu_spin_init(&pd[i].lock);
 534            }
 535        }
 536#endif
 537        existing = atomic_cmpxchg(lp, NULL, pd);
 538        if (unlikely(existing)) {
 539            g_free(pd);
 540            pd = existing;
 541        }
 542    }
 543
 544    return pd + (index & (V_L2_SIZE - 1));
 545}
 546
 547static inline PageDesc *page_find(tb_page_addr_t index)
 548{
 549    return page_find_alloc(index, 0);
 550}
 551
 552static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
 553                           PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
 554
 555/* In user-mode page locks aren't used; mmap_lock is enough */
 556#ifdef CONFIG_USER_ONLY
 557
 558#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
 559
 560static inline void page_lock(PageDesc *pd)
 561{ }
 562
 563static inline void page_unlock(PageDesc *pd)
 564{ }
 565
 566static inline void page_lock_tb(const TranslationBlock *tb)
 567{ }
 568
 569static inline void page_unlock_tb(const TranslationBlock *tb)
 570{ }
 571
 572struct page_collection *
 573page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
 574{
 575    return NULL;
 576}
 577
 578void page_collection_unlock(struct page_collection *set)
 579{ }
 580#else /* !CONFIG_USER_ONLY */
 581
 582#ifdef CONFIG_DEBUG_TCG
 583
 584static __thread GHashTable *ht_pages_locked_debug;
 585
 586static void ht_pages_locked_debug_init(void)
 587{
 588    if (ht_pages_locked_debug) {
 589        return;
 590    }
 591    ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
 592}
 593
 594static bool page_is_locked(const PageDesc *pd)
 595{
 596    PageDesc *found;
 597
 598    ht_pages_locked_debug_init();
 599    found = g_hash_table_lookup(ht_pages_locked_debug, pd);
 600    return !!found;
 601}
 602
 603static void page_lock__debug(PageDesc *pd)
 604{
 605    ht_pages_locked_debug_init();
 606    g_assert(!page_is_locked(pd));
 607    g_hash_table_insert(ht_pages_locked_debug, pd, pd);
 608}
 609
 610static void page_unlock__debug(const PageDesc *pd)
 611{
 612    bool removed;
 613
 614    ht_pages_locked_debug_init();
 615    g_assert(page_is_locked(pd));
 616    removed = g_hash_table_remove(ht_pages_locked_debug, pd);
 617    g_assert(removed);
 618}
 619
 620static void
 621do_assert_page_locked(const PageDesc *pd, const char *file, int line)
 622{
 623    if (unlikely(!page_is_locked(pd))) {
 624        error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
 625                     pd, file, line);
 626        abort();
 627    }
 628}
 629
 630#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
 631
 632void assert_no_pages_locked(void)
 633{
 634    ht_pages_locked_debug_init();
 635    g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
 636}
 637
 638#else /* !CONFIG_DEBUG_TCG */
 639
 640#define assert_page_locked(pd)
 641
 642static inline void page_lock__debug(const PageDesc *pd)
 643{
 644}
 645
 646static inline void page_unlock__debug(const PageDesc *pd)
 647{
 648}
 649
 650#endif /* CONFIG_DEBUG_TCG */
 651
 652static inline void page_lock(PageDesc *pd)
 653{
 654    page_lock__debug(pd);
 655    qemu_spin_lock(&pd->lock);
 656}
 657
 658static inline void page_unlock(PageDesc *pd)
 659{
 660    qemu_spin_unlock(&pd->lock);
 661    page_unlock__debug(pd);
 662}
 663
 664/* lock the page(s) of a TB in the correct acquisition order */
 665static inline void page_lock_tb(const TranslationBlock *tb)
 666{
 667    page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
 668}
 669
 670static inline void page_unlock_tb(const TranslationBlock *tb)
 671{
 672    PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
 673
 674    page_unlock(p1);
 675    if (unlikely(tb->page_addr[1] != -1)) {
 676        PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
 677
 678        if (p2 != p1) {
 679            page_unlock(p2);
 680        }
 681    }
 682}
 683
 684static inline struct page_entry *
 685page_entry_new(PageDesc *pd, tb_page_addr_t index)
 686{
 687    struct page_entry *pe = g_malloc(sizeof(*pe));
 688
 689    pe->index = index;
 690    pe->pd = pd;
 691    pe->locked = false;
 692    return pe;
 693}
 694
 695static void page_entry_destroy(gpointer p)
 696{
 697    struct page_entry *pe = p;
 698
 699    g_assert(pe->locked);
 700    page_unlock(pe->pd);
 701    g_free(pe);
 702}
 703
 704/* returns false on success */
 705static bool page_entry_trylock(struct page_entry *pe)
 706{
 707    bool busy;
 708
 709    busy = qemu_spin_trylock(&pe->pd->lock);
 710    if (!busy) {
 711        g_assert(!pe->locked);
 712        pe->locked = true;
 713        page_lock__debug(pe->pd);
 714    }
 715    return busy;
 716}
 717
 718static void do_page_entry_lock(struct page_entry *pe)
 719{
 720    page_lock(pe->pd);
 721    g_assert(!pe->locked);
 722    pe->locked = true;
 723}
 724
 725static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
 726{
 727    struct page_entry *pe = value;
 728
 729    do_page_entry_lock(pe);
 730    return FALSE;
 731}
 732
 733static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
 734{
 735    struct page_entry *pe = value;
 736
 737    if (pe->locked) {
 738        pe->locked = false;
 739        page_unlock(pe->pd);
 740    }
 741    return FALSE;
 742}
 743
 744/*
 745 * Trylock a page, and if successful, add the page to a collection.
 746 * Returns true ("busy") if the page could not be locked; false otherwise.
 747 */
 748static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
 749{
 750    tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
 751    struct page_entry *pe;
 752    PageDesc *pd;
 753
 754    pe = g_tree_lookup(set->tree, &index);
 755    if (pe) {
 756        return false;
 757    }
 758
 759    pd = page_find(index);
 760    if (pd == NULL) {
 761        return false;
 762    }
 763
 764    pe = page_entry_new(pd, index);
 765    g_tree_insert(set->tree, &pe->index, pe);
 766
 767    /*
 768     * If this is either (1) the first insertion or (2) a page whose index
 769     * is higher than any other so far, just lock the page and move on.
 770     */
 771    if (set->max == NULL || pe->index > set->max->index) {
 772        set->max = pe;
 773        do_page_entry_lock(pe);
 774        return false;
 775    }
 776    /*
 777     * Try to acquire out-of-order lock; if busy, return busy so that we acquire
 778     * locks in order.
 779     */
 780    return page_entry_trylock(pe);
 781}
 782
 783static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
 784{
 785    tb_page_addr_t a = *(const tb_page_addr_t *)ap;
 786    tb_page_addr_t b = *(const tb_page_addr_t *)bp;
 787
 788    if (a == b) {
 789        return 0;
 790    } else if (a < b) {
 791        return -1;
 792    }
 793    return 1;
 794}
 795
 796/*
 797 * Lock a range of pages ([@start,@end[) as well as the pages of all
 798 * intersecting TBs.
 799 * Locking order: acquire locks in ascending order of page index.
 800 */
 801struct page_collection *
 802page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
 803{
 804    struct page_collection *set = g_malloc(sizeof(*set));
 805    tb_page_addr_t index;
 806    PageDesc *pd;
 807
 808    start >>= TARGET_PAGE_BITS;
 809    end   >>= TARGET_PAGE_BITS;
 810    g_assert(start <= end);
 811
 812    set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
 813                                page_entry_destroy);
 814    set->max = NULL;
 815    assert_no_pages_locked();
 816
 817 retry:
 818    g_tree_foreach(set->tree, page_entry_lock, NULL);
 819
 820    for (index = start; index <= end; index++) {
 821        TranslationBlock *tb;
 822        int n;
 823
 824        pd = page_find(index);
 825        if (pd == NULL) {
 826            continue;
 827        }
 828        if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
 829            g_tree_foreach(set->tree, page_entry_unlock, NULL);
 830            goto retry;
 831        }
 832        assert_page_locked(pd);
 833        PAGE_FOR_EACH_TB(pd, tb, n) {
 834            if (page_trylock_add(set, tb->page_addr[0]) ||
 835                (tb->page_addr[1] != -1 &&
 836                 page_trylock_add(set, tb->page_addr[1]))) {
 837                /* drop all locks, and reacquire in order */
 838                g_tree_foreach(set->tree, page_entry_unlock, NULL);
 839                goto retry;
 840            }
 841        }
 842    }
 843    return set;
 844}
 845
 846void page_collection_unlock(struct page_collection *set)
 847{
 848    /* entries are unlocked and freed via page_entry_destroy */
 849    g_tree_destroy(set->tree);
 850    g_free(set);
 851}
 852
 853#endif /* !CONFIG_USER_ONLY */
 854
 855static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
 856                           PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
 857{
 858    PageDesc *p1, *p2;
 859    tb_page_addr_t page1;
 860    tb_page_addr_t page2;
 861
 862    assert_memory_lock();
 863    g_assert(phys1 != -1);
 864
 865    page1 = phys1 >> TARGET_PAGE_BITS;
 866    page2 = phys2 >> TARGET_PAGE_BITS;
 867
 868    p1 = page_find_alloc(page1, alloc);
 869    if (ret_p1) {
 870        *ret_p1 = p1;
 871    }
 872    if (likely(phys2 == -1)) {
 873        page_lock(p1);
 874        return;
 875    } else if (page1 == page2) {
 876        page_lock(p1);
 877        if (ret_p2) {
 878            *ret_p2 = p1;
 879        }
 880        return;
 881    }
 882    p2 = page_find_alloc(page2, alloc);
 883    if (ret_p2) {
 884        *ret_p2 = p2;
 885    }
 886    if (page1 < page2) {
 887        page_lock(p1);
 888        page_lock(p2);
 889    } else {
 890        page_lock(p2);
 891        page_lock(p1);
 892    }
 893}
 894
 895#if defined(CONFIG_USER_ONLY)
 896/* Currently it is not recommended to allocate big chunks of data in
 897   user mode. It will change when a dedicated libc will be used.  */
 898/* ??? 64-bit hosts ought to have no problem mmaping data outside the
 899   region in which the guest needs to run.  Revisit this.  */
 900#define USE_STATIC_CODE_GEN_BUFFER
 901#endif
 902
 903/* Minimum size of the code gen buffer.  This number is randomly chosen,
 904   but not so small that we can't have a fair number of TB's live.  */
 905#define MIN_CODE_GEN_BUFFER_SIZE     (1024u * 1024)
 906
 907/* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
 908   indicated, this is constrained by the range of direct branches on the
 909   host cpu, as used by the TCG implementation of goto_tb.  */
 910#if defined(__x86_64__)
 911# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
 912#elif defined(__sparc__)
 913# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
 914#elif defined(__powerpc64__)
 915# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
 916#elif defined(__powerpc__)
 917# define MAX_CODE_GEN_BUFFER_SIZE  (32u * 1024 * 1024)
 918#elif defined(__aarch64__)
 919# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
 920#elif defined(__s390x__)
 921  /* We have a +- 4GB range on the branches; leave some slop.  */
 922# define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
 923#elif defined(__mips__)
 924  /* We have a 256MB branch region, but leave room to make sure the
 925     main executable is also within that region.  */
 926# define MAX_CODE_GEN_BUFFER_SIZE  (128ul * 1024 * 1024)
 927#else
 928# define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
 929#endif
 930
 931#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
 932
 933#define DEFAULT_CODE_GEN_BUFFER_SIZE \
 934  (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
 935   ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
 936
 937static inline size_t size_code_gen_buffer(size_t tb_size)
 938{
 939    /* Size the buffer.  */
 940    if (tb_size == 0) {
 941#ifdef USE_STATIC_CODE_GEN_BUFFER
 942        tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
 943#else
 944        /* ??? Needs adjustments.  */
 945        /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
 946           static buffer, we could size this on RESERVED_VA, on the text
 947           segment size of the executable, or continue to use the default.  */
 948        tb_size = (unsigned long)(ram_size / 4);
 949#endif
 950    }
 951    if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
 952        tb_size = MIN_CODE_GEN_BUFFER_SIZE;
 953    }
 954    if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
 955        tb_size = MAX_CODE_GEN_BUFFER_SIZE;
 956    }
 957    return tb_size;
 958}
 959
 960#ifdef __mips__
 961/* In order to use J and JAL within the code_gen_buffer, we require
 962   that the buffer not cross a 256MB boundary.  */
 963static inline bool cross_256mb(void *addr, size_t size)
 964{
 965    return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
 966}
 967
 968/* We weren't able to allocate a buffer without crossing that boundary,
 969   so make do with the larger portion of the buffer that doesn't cross.
 970   Returns the new base of the buffer, and adjusts code_gen_buffer_size.  */
 971static inline void *split_cross_256mb(void *buf1, size_t size1)
 972{
 973    void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
 974    size_t size2 = buf1 + size1 - buf2;
 975
 976    size1 = buf2 - buf1;
 977    if (size1 < size2) {
 978        size1 = size2;
 979        buf1 = buf2;
 980    }
 981
 982    tcg_ctx->code_gen_buffer_size = size1;
 983    return buf1;
 984}
 985#endif
 986
 987#ifdef USE_STATIC_CODE_GEN_BUFFER
 988static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
 989    __attribute__((aligned(CODE_GEN_ALIGN)));
 990
 991static inline void *alloc_code_gen_buffer(void)
 992{
 993    void *buf = static_code_gen_buffer;
 994    void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
 995    size_t size;
 996
 997    /* page-align the beginning and end of the buffer */
 998    buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
 999    end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
1000
1001    size = end - buf;
1002
1003    /* Honor a command-line option limiting the size of the buffer.  */
1004    if (size > tcg_ctx->code_gen_buffer_size) {
1005        size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
1006                               qemu_real_host_page_size);
1007    }
1008    tcg_ctx->code_gen_buffer_size = size;
1009
1010#ifdef __mips__
1011    if (cross_256mb(buf, size)) {
1012        buf = split_cross_256mb(buf, size);
1013        size = tcg_ctx->code_gen_buffer_size;
1014    }
1015#endif
1016
1017    if (qemu_mprotect_rwx(buf, size)) {
1018        abort();
1019    }
1020    qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1021
1022    return buf;
1023}
1024#elif defined(_WIN32)
1025static inline void *alloc_code_gen_buffer(void)
1026{
1027    size_t size = tcg_ctx->code_gen_buffer_size;
1028    return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1029                        PAGE_EXECUTE_READWRITE);
1030}
1031#else
1032static inline void *alloc_code_gen_buffer(void)
1033{
1034    int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
1035    int flags = MAP_PRIVATE | MAP_ANONYMOUS;
1036    uintptr_t start = 0;
1037    size_t size = tcg_ctx->code_gen_buffer_size;
1038    void *buf;
1039
1040    /* Constrain the position of the buffer based on the host cpu.
1041       Note that these addresses are chosen in concert with the
1042       addresses assigned in the relevant linker script file.  */
1043# if defined(__PIE__) || defined(__PIC__)
1044    /* Don't bother setting a preferred location if we're building
1045       a position-independent executable.  We're more likely to get
1046       an address near the main executable if we let the kernel
1047       choose the address.  */
1048# elif defined(__x86_64__) && defined(MAP_32BIT)
1049    /* Force the memory down into low memory with the executable.
1050       Leave the choice of exact location with the kernel.  */
1051    flags |= MAP_32BIT;
1052    /* Cannot expect to map more than 800MB in low memory.  */
1053    if (size > 800u * 1024 * 1024) {
1054        tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
1055    }
1056# elif defined(__sparc__)
1057    start = 0x40000000ul;
1058# elif defined(__s390x__)
1059    start = 0x90000000ul;
1060# elif defined(__mips__)
1061#  if _MIPS_SIM == _ABI64
1062    start = 0x128000000ul;
1063#  else
1064    start = 0x08000000ul;
1065#  endif
1066# endif
1067
1068    buf = mmap((void *)start, size, prot, flags, -1, 0);
1069    if (buf == MAP_FAILED) {
1070        return NULL;
1071    }
1072
1073#ifdef __mips__
1074    if (cross_256mb(buf, size)) {
1075        /* Try again, with the original still mapped, to avoid re-acquiring
1076           that 256mb crossing.  This time don't specify an address.  */
1077        size_t size2;
1078        void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1079        switch ((int)(buf2 != MAP_FAILED)) {
1080        case 1:
1081            if (!cross_256mb(buf2, size)) {
1082                /* Success!  Use the new buffer.  */
1083                munmap(buf, size);
1084                break;
1085            }
1086            /* Failure.  Work with what we had.  */
1087            munmap(buf2, size);
1088            /* fallthru */
1089        default:
1090            /* Split the original buffer.  Free the smaller half.  */
1091            buf2 = split_cross_256mb(buf, size);
1092            size2 = tcg_ctx->code_gen_buffer_size;
1093            if (buf == buf2) {
1094                munmap(buf + size2, size - size2);
1095            } else {
1096                munmap(buf, size - size2);
1097            }
1098            size = size2;
1099            break;
1100        }
1101        buf = buf2;
1102    }
1103#endif
1104
1105    /* Request large pages for the buffer.  */
1106    qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1107
1108    return buf;
1109}
1110#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1111
1112static inline void code_gen_alloc(size_t tb_size)
1113{
1114    tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
1115    tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
1116    if (tcg_ctx->code_gen_buffer == NULL) {
1117        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
1118        exit(1);
1119    }
1120}
1121
1122static bool tb_cmp(const void *ap, const void *bp)
1123{
1124    const TranslationBlock *a = ap;
1125    const TranslationBlock *b = bp;
1126
1127    return a->pc == b->pc &&
1128        a->cs_base == b->cs_base &&
1129        a->flags == b->flags &&
1130        (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1131        a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1132        a->page_addr[0] == b->page_addr[0] &&
1133        a->page_addr[1] == b->page_addr[1];
1134}
1135
1136static void tb_htable_init(void)
1137{
1138    unsigned int mode = QHT_MODE_AUTO_RESIZE;
1139
1140    qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1141}
1142
1143/* Must be called before using the QEMU cpus. 'tb_size' is the size
1144   (in bytes) allocated to the translation buffer. Zero means default
1145   size. */
1146void tcg_exec_init(unsigned long tb_size)
1147{
1148    tcg_allowed = true;
1149    cpu_gen_init();
1150    page_init();
1151    tb_htable_init();
1152    code_gen_alloc(tb_size);
1153#if defined(CONFIG_SOFTMMU)
1154    /* There's no guest base to take into account, so go ahead and
1155       initialize the prologue now.  */
1156    tcg_prologue_init(tcg_ctx);
1157#endif
1158}
1159
1160/*
1161 * Allocate a new translation block. Flush the translation buffer if
1162 * too many translation blocks or too much generated code.
1163 */
1164static TranslationBlock *tb_alloc(target_ulong pc)
1165{
1166    TranslationBlock *tb;
1167
1168    assert_memory_lock();
1169
1170    tb = tcg_tb_alloc(tcg_ctx);
1171    if (unlikely(tb == NULL)) {
1172        return NULL;
1173    }
1174    return tb;
1175}
1176
1177/* call with @p->lock held */
1178static inline void invalidate_page_bitmap(PageDesc *p)
1179{
1180    assert_page_locked(p);
1181#ifdef CONFIG_SOFTMMU
1182    g_free(p->code_bitmap);
1183    p->code_bitmap = NULL;
1184    p->code_write_count = 0;
1185#endif
1186}
1187
1188/* Set to NULL all the 'first_tb' fields in all PageDescs. */
1189static void page_flush_tb_1(int level, void **lp)
1190{
1191    int i;
1192
1193    if (*lp == NULL) {
1194        return;
1195    }
1196    if (level == 0) {
1197        PageDesc *pd = *lp;
1198
1199        for (i = 0; i < V_L2_SIZE; ++i) {
1200            page_lock(&pd[i]);
1201            pd[i].first_tb = (uintptr_t)NULL;
1202            invalidate_page_bitmap(pd + i);
1203            page_unlock(&pd[i]);
1204        }
1205    } else {
1206        void **pp = *lp;
1207
1208        for (i = 0; i < V_L2_SIZE; ++i) {
1209            page_flush_tb_1(level - 1, pp + i);
1210        }
1211    }
1212}
1213
1214static void page_flush_tb(void)
1215{
1216    int i, l1_sz = v_l1_size;
1217
1218    for (i = 0; i < l1_sz; i++) {
1219        page_flush_tb_1(v_l2_levels, l1_map + i);
1220    }
1221}
1222
1223static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1224{
1225    const TranslationBlock *tb = value;
1226    size_t *size = data;
1227
1228    *size += tb->tc.size;
1229    return false;
1230}
1231
1232/* flush all the translation blocks */
1233static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1234{
1235    mmap_lock();
1236    /* If it is already been done on request of another CPU,
1237     * just retry.
1238     */
1239    if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1240        goto done;
1241    }
1242
1243    if (DEBUG_TB_FLUSH_GATE) {
1244        size_t nb_tbs = tcg_nb_tbs();
1245        size_t host_size = 0;
1246
1247        tcg_tb_foreach(tb_host_size_iter, &host_size);
1248        printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1249               tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1250    }
1251
1252    CPU_FOREACH(cpu) {
1253        cpu_tb_jmp_cache_clear(cpu);
1254    }
1255
1256    qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1257    page_flush_tb();
1258
1259    tcg_region_reset_all();
1260    /* XXX: flush processor icache at this point if cache flush is
1261       expensive */
1262    atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1263
1264done:
1265    mmap_unlock();
1266}
1267
1268void tb_flush(CPUState *cpu)
1269{
1270    if (tcg_enabled()) {
1271        unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
1272        async_safe_run_on_cpu(cpu, do_tb_flush,
1273                              RUN_ON_CPU_HOST_INT(tb_flush_count));
1274    }
1275}
1276
1277/*
1278 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1279 * so in order to prevent bit rot we compile them unconditionally in user-mode,
1280 * and let the optimizer get rid of them by wrapping their user-only callers
1281 * with if (DEBUG_TB_CHECK_GATE).
1282 */
1283#ifdef CONFIG_USER_ONLY
1284
1285static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1286{
1287    TranslationBlock *tb = p;
1288    target_ulong addr = *(target_ulong *)userp;
1289
1290    if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1291        printf("ERROR invalidate: address=" TARGET_FMT_lx
1292               " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1293    }
1294}
1295
1296/* verify that all the pages have correct rights for code
1297 *
1298 * Called with mmap_lock held.
1299 */
1300static void tb_invalidate_check(target_ulong address)
1301{
1302    address &= TARGET_PAGE_MASK;
1303    qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1304}
1305
1306static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1307{
1308    TranslationBlock *tb = p;
1309    int flags1, flags2;
1310
1311    flags1 = page_get_flags(tb->pc);
1312    flags2 = page_get_flags(tb->pc + tb->size - 1);
1313    if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1314        printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1315               (long)tb->pc, tb->size, flags1, flags2);
1316    }
1317}
1318
1319/* verify that all the pages have correct rights for code */
1320static void tb_page_check(void)
1321{
1322    qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1323}
1324
1325#endif /* CONFIG_USER_ONLY */
1326
1327/*
1328 * user-mode: call with mmap_lock held
1329 * !user-mode: call with @pd->lock held
1330 */
1331static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1332{
1333    TranslationBlock *tb1;
1334    uintptr_t *pprev;
1335    unsigned int n1;
1336
1337    assert_page_locked(pd);
1338    pprev = &pd->first_tb;
1339    PAGE_FOR_EACH_TB(pd, tb1, n1) {
1340        if (tb1 == tb) {
1341            *pprev = tb1->page_next[n1];
1342            return;
1343        }
1344        pprev = &tb1->page_next[n1];
1345    }
1346    g_assert_not_reached();
1347}
1348
1349/* remove @orig from its @n_orig-th jump list */
1350static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1351{
1352    uintptr_t ptr, ptr_locked;
1353    TranslationBlock *dest;
1354    TranslationBlock *tb;
1355    uintptr_t *pprev;
1356    int n;
1357
1358    /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1359    ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1360    dest = (TranslationBlock *)(ptr & ~1);
1361    if (dest == NULL) {
1362        return;
1363    }
1364
1365    qemu_spin_lock(&dest->jmp_lock);
1366    /*
1367     * While acquiring the lock, the jump might have been removed if the
1368     * destination TB was invalidated; check again.
1369     */
1370    ptr_locked = atomic_read(&orig->jmp_dest[n_orig]);
1371    if (ptr_locked != ptr) {
1372        qemu_spin_unlock(&dest->jmp_lock);
1373        /*
1374         * The only possibility is that the jump was unlinked via
1375         * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1376         * because we set the LSB above.
1377         */
1378        g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1379        return;
1380    }
1381    /*
1382     * We first acquired the lock, and since the destination pointer matches,
1383     * we know for sure that @orig is in the jmp list.
1384     */
1385    pprev = &dest->jmp_list_head;
1386    TB_FOR_EACH_JMP(dest, tb, n) {
1387        if (tb == orig && n == n_orig) {
1388            *pprev = tb->jmp_list_next[n];
1389            /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1390            qemu_spin_unlock(&dest->jmp_lock);
1391            return;
1392        }
1393        pprev = &tb->jmp_list_next[n];
1394    }
1395    g_assert_not_reached();
1396}
1397
1398/* reset the jump entry 'n' of a TB so that it is not chained to
1399   another TB */
1400static inline void tb_reset_jump(TranslationBlock *tb, int n)
1401{
1402    uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1403    tb_set_jmp_target(tb, n, addr);
1404}
1405
1406/* remove any jumps to the TB */
1407static inline void tb_jmp_unlink(TranslationBlock *dest)
1408{
1409    TranslationBlock *tb;
1410    int n;
1411
1412    qemu_spin_lock(&dest->jmp_lock);
1413
1414    TB_FOR_EACH_JMP(dest, tb, n) {
1415        tb_reset_jump(tb, n);
1416        atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1417        /* No need to clear the list entry; setting the dest ptr is enough */
1418    }
1419    dest->jmp_list_head = (uintptr_t)NULL;
1420
1421    qemu_spin_unlock(&dest->jmp_lock);
1422}
1423
1424/*
1425 * In user-mode, call with mmap_lock held.
1426 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1427 * locks held.
1428 */
1429static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1430{
1431    CPUState *cpu;
1432    PageDesc *p;
1433    uint32_t h;
1434    tb_page_addr_t phys_pc;
1435
1436    assert_memory_lock();
1437
1438    /* make sure no further incoming jumps will be chained to this TB */
1439    qemu_spin_lock(&tb->jmp_lock);
1440    atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1441    qemu_spin_unlock(&tb->jmp_lock);
1442
1443    /* remove the TB from the hash list */
1444    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1445    h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
1446                     tb->trace_vcpu_dstate);
1447    if (!(tb->cflags & CF_NOCACHE) &&
1448        !qht_remove(&tb_ctx.htable, tb, h)) {
1449        return;
1450    }
1451
1452    /* remove the TB from the page list */
1453    if (rm_from_page_list) {
1454        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1455        tb_page_remove(p, tb);
1456        invalidate_page_bitmap(p);
1457        if (tb->page_addr[1] != -1) {
1458            p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1459            tb_page_remove(p, tb);
1460            invalidate_page_bitmap(p);
1461        }
1462    }
1463
1464    /* remove the TB from the hash list */
1465    h = tb_jmp_cache_hash_func(tb->pc);
1466    CPU_FOREACH(cpu) {
1467        if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1468            atomic_set(&cpu->tb_jmp_cache[h], NULL);
1469        }
1470    }
1471
1472    /* suppress this TB from the two jump lists */
1473    tb_remove_from_jmp_list(tb, 0);
1474    tb_remove_from_jmp_list(tb, 1);
1475
1476    /* suppress any remaining jumps to this TB */
1477    tb_jmp_unlink(tb);
1478
1479    atomic_set(&tcg_ctx->tb_phys_invalidate_count,
1480               tcg_ctx->tb_phys_invalidate_count + 1);
1481}
1482
1483static void tb_phys_invalidate__locked(TranslationBlock *tb)
1484{
1485    do_tb_phys_invalidate(tb, true);
1486}
1487
1488/* invalidate one TB
1489 *
1490 * Called with mmap_lock held in user-mode.
1491 */
1492void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1493{
1494    if (page_addr == -1 && tb->page_addr[0] != -1) {
1495        page_lock_tb(tb);
1496        do_tb_phys_invalidate(tb, true);
1497        page_unlock_tb(tb);
1498    } else {
1499        do_tb_phys_invalidate(tb, false);
1500    }
1501}
1502
1503#ifdef CONFIG_SOFTMMU
1504/* call with @p->lock held */
1505static void build_page_bitmap(PageDesc *p)
1506{
1507    int n, tb_start, tb_end;
1508    TranslationBlock *tb;
1509
1510    assert_page_locked(p);
1511    p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1512
1513    PAGE_FOR_EACH_TB(p, tb, n) {
1514        /* NOTE: this is subtle as a TB may span two physical pages */
1515        if (n == 0) {
1516            /* NOTE: tb_end may be after the end of the page, but
1517               it is not a problem */
1518            tb_start = tb->pc & ~TARGET_PAGE_MASK;
1519            tb_end = tb_start + tb->size;
1520            if (tb_end > TARGET_PAGE_SIZE) {
1521                tb_end = TARGET_PAGE_SIZE;
1522             }
1523        } else {
1524            tb_start = 0;
1525            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1526        }
1527        bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1528    }
1529}
1530#endif
1531
1532/* add the tb in the target page and protect it if necessary
1533 *
1534 * Called with mmap_lock held for user-mode emulation.
1535 * Called with @p->lock held in !user-mode.
1536 */
1537static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1538                               unsigned int n, tb_page_addr_t page_addr)
1539{
1540#ifndef CONFIG_USER_ONLY
1541    bool page_already_protected;
1542#endif
1543
1544    assert_page_locked(p);
1545
1546    tb->page_addr[n] = page_addr;
1547    tb->page_next[n] = p->first_tb;
1548#ifndef CONFIG_USER_ONLY
1549    page_already_protected = p->first_tb != (uintptr_t)NULL;
1550#endif
1551    p->first_tb = (uintptr_t)tb | n;
1552    invalidate_page_bitmap(p);
1553
1554#if defined(CONFIG_USER_ONLY)
1555    if (p->flags & PAGE_WRITE) {
1556        target_ulong addr;
1557        PageDesc *p2;
1558        int prot;
1559
1560        /* force the host page as non writable (writes will have a
1561           page fault + mprotect overhead) */
1562        page_addr &= qemu_host_page_mask;
1563        prot = 0;
1564        for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1565            addr += TARGET_PAGE_SIZE) {
1566
1567            p2 = page_find(addr >> TARGET_PAGE_BITS);
1568            if (!p2) {
1569                continue;
1570            }
1571            prot |= p2->flags;
1572            p2->flags &= ~PAGE_WRITE;
1573          }
1574        mprotect(g2h(page_addr), qemu_host_page_size,
1575                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1576        if (DEBUG_TB_INVALIDATE_GATE) {
1577            printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1578        }
1579    }
1580#else
1581    /* if some code is already present, then the pages are already
1582       protected. So we handle the case where only the first TB is
1583       allocated in a physical page */
1584    if (!page_already_protected) {
1585        tlb_protect_code(page_addr);
1586    }
1587#endif
1588}
1589
1590/* add a new TB and link it to the physical page tables. phys_page2 is
1591 * (-1) to indicate that only one page contains the TB.
1592 *
1593 * Called with mmap_lock held for user-mode emulation.
1594 *
1595 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1596 * Note that in !user-mode, another thread might have already added a TB
1597 * for the same block of guest code that @tb corresponds to. In that case,
1598 * the caller should discard the original @tb, and use instead the returned TB.
1599 */
1600static TranslationBlock *
1601tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1602             tb_page_addr_t phys_page2)
1603{
1604    PageDesc *p;
1605    PageDesc *p2 = NULL;
1606
1607    assert_memory_lock();
1608
1609    if (phys_pc == -1) {
1610        /*
1611         * If the TB is not associated with a physical RAM page then
1612         * it must be a temporary one-insn TB, and we have nothing to do
1613         * except fill in the page_addr[] fields.
1614         */
1615        assert(tb->cflags & CF_NOCACHE);
1616        tb->page_addr[0] = tb->page_addr[1] = -1;
1617        return tb;
1618    }
1619
1620    /*
1621     * Add the TB to the page list, acquiring first the pages's locks.
1622     * We keep the locks held until after inserting the TB in the hash table,
1623     * so that if the insertion fails we know for sure that the TBs are still
1624     * in the page descriptors.
1625     * Note that inserting into the hash table first isn't an option, since
1626     * we can only insert TBs that are fully initialized.
1627     */
1628    page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1629    tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1630    if (p2) {
1631        tb_page_add(p2, tb, 1, phys_page2);
1632    } else {
1633        tb->page_addr[1] = -1;
1634    }
1635
1636    if (!(tb->cflags & CF_NOCACHE)) {
1637        void *existing_tb = NULL;
1638        uint32_t h;
1639
1640        /* add in the hash table */
1641        h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1642                         tb->trace_vcpu_dstate);
1643        qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1644
1645        /* remove TB from the page(s) if we couldn't insert it */
1646        if (unlikely(existing_tb)) {
1647            tb_page_remove(p, tb);
1648            invalidate_page_bitmap(p);
1649            if (p2) {
1650                tb_page_remove(p2, tb);
1651                invalidate_page_bitmap(p2);
1652            }
1653            tb = existing_tb;
1654        }
1655    }
1656
1657    if (p2 && p2 != p) {
1658        page_unlock(p2);
1659    }
1660    page_unlock(p);
1661
1662#ifdef CONFIG_USER_ONLY
1663    if (DEBUG_TB_CHECK_GATE) {
1664        tb_page_check();
1665    }
1666#endif
1667    return tb;
1668}
1669
1670/* Called with mmap_lock held for user mode emulation.  */
1671TranslationBlock *tb_gen_code(CPUState *cpu,
1672                              target_ulong pc, target_ulong cs_base,
1673                              uint32_t flags, int cflags)
1674{
1675    CPUArchState *env = cpu->env_ptr;
1676    TranslationBlock *tb, *existing_tb;
1677    tb_page_addr_t phys_pc, phys_page2;
1678    target_ulong virt_page2;
1679    tcg_insn_unit *gen_code_buf;
1680    int gen_code_size, search_size;
1681#ifdef CONFIG_PROFILER
1682    TCGProfile *prof = &tcg_ctx->prof;
1683    int64_t ti;
1684#endif
1685    assert_memory_lock();
1686
1687    phys_pc = get_page_addr_code(env, pc);
1688
1689    if (phys_pc == -1) {
1690        /* Generate a temporary TB with 1 insn in it */
1691        cflags &= ~CF_COUNT_MASK;
1692        cflags |= CF_NOCACHE | 1;
1693    }
1694
1695 buffer_overflow:
1696    tb = tb_alloc(pc);
1697    if (unlikely(!tb)) {
1698        /* flush must be done */
1699        tb_flush(cpu);
1700        mmap_unlock();
1701        /* Make the execution loop process the flush as soon as possible.  */
1702        cpu->exception_index = EXCP_INTERRUPT;
1703        cpu_loop_exit(cpu);
1704    }
1705
1706    gen_code_buf = tcg_ctx->code_gen_ptr;
1707    tb->tc.ptr = gen_code_buf;
1708    tb->pc = pc;
1709    tb->cs_base = cs_base;
1710    tb->flags = flags;
1711    tb->cflags = cflags;
1712    tb->trace_vcpu_dstate = *cpu->trace_dstate;
1713    tcg_ctx->tb_cflags = cflags;
1714
1715#ifdef CONFIG_PROFILER
1716    /* includes aborted translations because of exceptions */
1717    atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1718    ti = profile_getclock();
1719#endif
1720
1721    tcg_func_start(tcg_ctx);
1722
1723    tcg_ctx->cpu = ENV_GET_CPU(env);
1724    gen_intermediate_code(cpu, tb);
1725    tcg_ctx->cpu = NULL;
1726
1727    trace_translate_block(tb, tb->pc, tb->tc.ptr);
1728
1729    /* generate machine code */
1730    tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1731    tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1732    tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1733    if (TCG_TARGET_HAS_direct_jump) {
1734        tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1735        tcg_ctx->tb_jmp_target_addr = NULL;
1736    } else {
1737        tcg_ctx->tb_jmp_insn_offset = NULL;
1738        tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1739    }
1740
1741#ifdef CONFIG_PROFILER
1742    atomic_set(&prof->tb_count, prof->tb_count + 1);
1743    atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1744    ti = profile_getclock();
1745#endif
1746
1747    /* ??? Overflow could be handled better here.  In particular, we
1748       don't need to re-do gen_intermediate_code, nor should we re-do
1749       the tcg optimization currently hidden inside tcg_gen_code.  All
1750       that should be required is to flush the TBs, allocate a new TB,
1751       re-initialize it per above, and re-do the actual code generation.  */
1752    gen_code_size = tcg_gen_code(tcg_ctx, tb);
1753    if (unlikely(gen_code_size < 0)) {
1754        goto buffer_overflow;
1755    }
1756    search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1757    if (unlikely(search_size < 0)) {
1758        goto buffer_overflow;
1759    }
1760    tb->tc.size = gen_code_size;
1761
1762#ifdef CONFIG_PROFILER
1763    atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1764    atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1765    atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1766    atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1767#endif
1768
1769#ifdef DEBUG_DISAS
1770    if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1771        qemu_log_in_addr_range(tb->pc)) {
1772        qemu_log_lock();
1773        qemu_log("OUT: [size=%d]\n", gen_code_size);
1774        if (tcg_ctx->data_gen_ptr) {
1775            size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1776            size_t data_size = gen_code_size - code_size;
1777            size_t i;
1778
1779            log_disas(tb->tc.ptr, code_size);
1780
1781            for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1782                if (sizeof(tcg_target_ulong) == 8) {
1783                    qemu_log("0x%08" PRIxPTR ":  .quad  0x%016" PRIx64 "\n",
1784                             (uintptr_t)tcg_ctx->data_gen_ptr + i,
1785                             *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1786                } else {
1787                    qemu_log("0x%08" PRIxPTR ":  .long  0x%08x\n",
1788                             (uintptr_t)tcg_ctx->data_gen_ptr + i,
1789                             *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1790                }
1791            }
1792        } else {
1793            log_disas(tb->tc.ptr, gen_code_size);
1794        }
1795        qemu_log("\n");
1796        qemu_log_flush();
1797        qemu_log_unlock();
1798    }
1799#endif
1800
1801    atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1802        ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1803                 CODE_GEN_ALIGN));
1804
1805    /* init jump list */
1806    qemu_spin_init(&tb->jmp_lock);
1807    tb->jmp_list_head = (uintptr_t)NULL;
1808    tb->jmp_list_next[0] = (uintptr_t)NULL;
1809    tb->jmp_list_next[1] = (uintptr_t)NULL;
1810    tb->jmp_dest[0] = (uintptr_t)NULL;
1811    tb->jmp_dest[1] = (uintptr_t)NULL;
1812
1813    /* init original jump addresses which have been set during tcg_gen_code() */
1814    if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1815        tb_reset_jump(tb, 0);
1816    }
1817    if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1818        tb_reset_jump(tb, 1);
1819    }
1820
1821    /* check next page if needed */
1822    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1823    phys_page2 = -1;
1824    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1825        phys_page2 = get_page_addr_code(env, virt_page2);
1826    }
1827    /*
1828     * No explicit memory barrier is required -- tb_link_page() makes the
1829     * TB visible in a consistent state.
1830     */
1831    existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1832    /* if the TB already exists, discard what we just translated */
1833    if (unlikely(existing_tb != tb)) {
1834        uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1835
1836        orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1837        atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
1838        return existing_tb;
1839    }
1840    tcg_tb_insert(tb);
1841    return tb;
1842}
1843
1844/*
1845 * @p must be non-NULL.
1846 * user-mode: call with mmap_lock held.
1847 * !user-mode: call with all @pages locked.
1848 */
1849static void
1850tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1851                                      PageDesc *p, tb_page_addr_t start,
1852                                      tb_page_addr_t end,
1853                                      int is_cpu_write_access)
1854{
1855    TranslationBlock *tb;
1856    tb_page_addr_t tb_start, tb_end;
1857    int n;
1858#ifdef TARGET_HAS_PRECISE_SMC
1859    CPUState *cpu = current_cpu;
1860    CPUArchState *env = NULL;
1861    int current_tb_not_found = is_cpu_write_access;
1862    TranslationBlock *current_tb = NULL;
1863    int current_tb_modified = 0;
1864    target_ulong current_pc = 0;
1865    target_ulong current_cs_base = 0;
1866    uint32_t current_flags = 0;
1867#endif /* TARGET_HAS_PRECISE_SMC */
1868
1869    assert_page_locked(p);
1870
1871#if defined(TARGET_HAS_PRECISE_SMC)
1872    if (cpu != NULL) {
1873        env = cpu->env_ptr;
1874    }
1875#endif
1876
1877    /* we remove all the TBs in the range [start, end[ */
1878    /* XXX: see if in some cases it could be faster to invalidate all
1879       the code */
1880    PAGE_FOR_EACH_TB(p, tb, n) {
1881        assert_page_locked(p);
1882        /* NOTE: this is subtle as a TB may span two physical pages */
1883        if (n == 0) {
1884            /* NOTE: tb_end may be after the end of the page, but
1885               it is not a problem */
1886            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1887            tb_end = tb_start + tb->size;
1888        } else {
1889            tb_start = tb->page_addr[1];
1890            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1891        }
1892        if (!(tb_end <= start || tb_start >= end)) {
1893#ifdef TARGET_HAS_PRECISE_SMC
1894            if (current_tb_not_found) {
1895                current_tb_not_found = 0;
1896                current_tb = NULL;
1897                if (cpu->mem_io_pc) {
1898                    /* now we have a real cpu fault */
1899                    current_tb = tcg_tb_lookup(cpu->mem_io_pc);
1900                }
1901            }
1902            if (current_tb == tb &&
1903                (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1904                /* If we are modifying the current TB, we must stop
1905                its execution. We could be more precise by checking
1906                that the modification is after the current PC, but it
1907                would require a specialized function to partially
1908                restore the CPU state */
1909
1910                current_tb_modified = 1;
1911                cpu_restore_state_from_tb(cpu, current_tb,
1912                                          cpu->mem_io_pc, true);
1913                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1914                                     &current_flags);
1915            }
1916#endif /* TARGET_HAS_PRECISE_SMC */
1917            tb_phys_invalidate__locked(tb);
1918        }
1919    }
1920#if !defined(CONFIG_USER_ONLY)
1921    /* if no code remaining, no need to continue to use slow writes */
1922    if (!p->first_tb) {
1923        invalidate_page_bitmap(p);
1924        tlb_unprotect_code(start);
1925    }
1926#endif
1927#ifdef TARGET_HAS_PRECISE_SMC
1928    if (current_tb_modified) {
1929        page_collection_unlock(pages);
1930        /* Force execution of one insn next time.  */
1931        cpu->cflags_next_tb = 1 | curr_cflags();
1932        mmap_unlock();
1933        cpu_loop_exit_noexc(cpu);
1934    }
1935#endif
1936}
1937
1938/*
1939 * Invalidate all TBs which intersect with the target physical address range
1940 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1941 * 'is_cpu_write_access' should be true if called from a real cpu write
1942 * access: the virtual CPU will exit the current TB if code is modified inside
1943 * this TB.
1944 *
1945 * Called with mmap_lock held for user-mode emulation
1946 */
1947void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1948                                   int is_cpu_write_access)
1949{
1950    struct page_collection *pages;
1951    PageDesc *p;
1952
1953    assert_memory_lock();
1954
1955    p = page_find(start >> TARGET_PAGE_BITS);
1956    if (p == NULL) {
1957        return;
1958    }
1959    pages = page_collection_lock(start, end);
1960    tb_invalidate_phys_page_range__locked(pages, p, start, end,
1961                                          is_cpu_write_access);
1962    page_collection_unlock(pages);
1963}
1964
1965/*
1966 * Invalidate all TBs which intersect with the target physical address range
1967 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1968 * 'is_cpu_write_access' should be true if called from a real cpu write
1969 * access: the virtual CPU will exit the current TB if code is modified inside
1970 * this TB.
1971 *
1972 * Called with mmap_lock held for user-mode emulation.
1973 */
1974#ifdef CONFIG_SOFTMMU
1975void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
1976#else
1977void tb_invalidate_phys_range(target_ulong start, target_ulong end)
1978#endif
1979{
1980    struct page_collection *pages;
1981    tb_page_addr_t next;
1982
1983    assert_memory_lock();
1984
1985    pages = page_collection_lock(start, end);
1986    for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1987         start < end;
1988         start = next, next += TARGET_PAGE_SIZE) {
1989        PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
1990        tb_page_addr_t bound = MIN(next, end);
1991
1992        if (pd == NULL) {
1993            continue;
1994        }
1995        tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
1996    }
1997    page_collection_unlock(pages);
1998}
1999
2000#ifdef CONFIG_SOFTMMU
2001/* len must be <= 8 and start must be a multiple of len.
2002 * Called via softmmu_template.h when code areas are written to with
2003 * iothread mutex not held.
2004 *
2005 * Call with all @pages in the range [@start, @start + len[ locked.
2006 */
2007void tb_invalidate_phys_page_fast(struct page_collection *pages,
2008                                  tb_page_addr_t start, int len)
2009{
2010    PageDesc *p;
2011
2012    assert_memory_lock();
2013
2014    p = page_find(start >> TARGET_PAGE_BITS);
2015    if (!p) {
2016        return;
2017    }
2018
2019    assert_page_locked(p);
2020    if (!p->code_bitmap &&
2021        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2022        build_page_bitmap(p);
2023    }
2024    if (p->code_bitmap) {
2025        unsigned int nr;
2026        unsigned long b;
2027
2028        nr = start & ~TARGET_PAGE_MASK;
2029        b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2030        if (b & ((1 << len) - 1)) {
2031            goto do_invalidate;
2032        }
2033    } else {
2034    do_invalidate:
2035        tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1);
2036    }
2037}
2038#else
2039/* Called with mmap_lock held. If pc is not 0 then it indicates the
2040 * host PC of the faulting store instruction that caused this invalidate.
2041 * Returns true if the caller needs to abort execution of the current
2042 * TB (because it was modified by this store and the guest CPU has
2043 * precise-SMC semantics).
2044 */
2045static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2046{
2047    TranslationBlock *tb;
2048    PageDesc *p;
2049    int n;
2050#ifdef TARGET_HAS_PRECISE_SMC
2051    TranslationBlock *current_tb = NULL;
2052    CPUState *cpu = current_cpu;
2053    CPUArchState *env = NULL;
2054    int current_tb_modified = 0;
2055    target_ulong current_pc = 0;
2056    target_ulong current_cs_base = 0;
2057    uint32_t current_flags = 0;
2058#endif
2059
2060    assert_memory_lock();
2061
2062    addr &= TARGET_PAGE_MASK;
2063    p = page_find(addr >> TARGET_PAGE_BITS);
2064    if (!p) {
2065        return false;
2066    }
2067
2068#ifdef TARGET_HAS_PRECISE_SMC
2069    if (p->first_tb && pc != 0) {
2070        current_tb = tcg_tb_lookup(pc);
2071    }
2072    if (cpu != NULL) {
2073        env = cpu->env_ptr;
2074    }
2075#endif
2076    assert_page_locked(p);
2077    PAGE_FOR_EACH_TB(p, tb, n) {
2078#ifdef TARGET_HAS_PRECISE_SMC
2079        if (current_tb == tb &&
2080            (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2081                /* If we are modifying the current TB, we must stop
2082                   its execution. We could be more precise by checking
2083                   that the modification is after the current PC, but it
2084                   would require a specialized function to partially
2085                   restore the CPU state */
2086
2087            current_tb_modified = 1;
2088            cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2089            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2090                                 &current_flags);
2091        }
2092#endif /* TARGET_HAS_PRECISE_SMC */
2093        tb_phys_invalidate(tb, addr);
2094    }
2095    p->first_tb = (uintptr_t)NULL;
2096#ifdef TARGET_HAS_PRECISE_SMC
2097    if (current_tb_modified) {
2098        /* Force execution of one insn next time.  */
2099        cpu->cflags_next_tb = 1 | curr_cflags();
2100        return true;
2101    }
2102#endif
2103
2104    return false;
2105}
2106#endif
2107
2108/* user-mode: call with mmap_lock held */
2109void tb_check_watchpoint(CPUState *cpu)
2110{
2111    TranslationBlock *tb;
2112
2113    assert_memory_lock();
2114
2115    tb = tcg_tb_lookup(cpu->mem_io_pc);
2116    if (tb) {
2117        /* We can use retranslation to find the PC.  */
2118        cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true);
2119        tb_phys_invalidate(tb, -1);
2120    } else {
2121        /* The exception probably happened in a helper.  The CPU state should
2122           have been saved before calling it. Fetch the PC from there.  */
2123        CPUArchState *env = cpu->env_ptr;
2124        target_ulong pc, cs_base;
2125        tb_page_addr_t addr;
2126        uint32_t flags;
2127
2128        cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2129        addr = get_page_addr_code(env, pc);
2130        if (addr != -1) {
2131            tb_invalidate_phys_range(addr, addr + 1);
2132        }
2133    }
2134}
2135
2136#ifndef CONFIG_USER_ONLY
2137/* in deterministic execution mode, instructions doing device I/Os
2138 * must be at the end of the TB.
2139 *
2140 * Called by softmmu_template.h, with iothread mutex not held.
2141 */
2142void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2143{
2144#if defined(TARGET_MIPS) || defined(TARGET_SH4)
2145    CPUArchState *env = cpu->env_ptr;
2146#endif
2147    TranslationBlock *tb;
2148    uint32_t n;
2149
2150    tb = tcg_tb_lookup(retaddr);
2151    if (!tb) {
2152        cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2153                  (void *)retaddr);
2154    }
2155    cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2156
2157    /* On MIPS and SH, delay slot instructions can only be restarted if
2158       they were already the first instruction in the TB.  If this is not
2159       the first instruction in a TB then re-execute the preceding
2160       branch.  */
2161    n = 1;
2162#if defined(TARGET_MIPS)
2163    if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2164        && env->active_tc.PC != tb->pc) {
2165        env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
2166        cpu->icount_decr.u16.low++;
2167        env->hflags &= ~MIPS_HFLAG_BMASK;
2168        n = 2;
2169    }
2170#elif defined(TARGET_SH4)
2171    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
2172        && env->pc != tb->pc) {
2173        env->pc -= 2;
2174        cpu->icount_decr.u16.low++;
2175        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
2176        n = 2;
2177    }
2178#endif
2179
2180    /* Generate a new TB executing the I/O insn.  */
2181    cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
2182
2183    if (tb_cflags(tb) & CF_NOCACHE) {
2184        if (tb->orig_tb) {
2185            /* Invalidate original TB if this TB was generated in
2186             * cpu_exec_nocache() */
2187            tb_phys_invalidate(tb->orig_tb, -1);
2188        }
2189        tcg_tb_remove(tb);
2190    }
2191
2192    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2193     * the first in the TB) then we end up generating a whole new TB and
2194     *  repeating the fault, which is horribly inefficient.
2195     *  Better would be to execute just this insn uncached, or generate a
2196     *  second new TB.
2197     */
2198    cpu_loop_exit_noexc(cpu);
2199}
2200
2201static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
2202{
2203    unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
2204
2205    for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
2206        atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
2207    }
2208}
2209
2210void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
2211{
2212    /* Discard jump cache entries for any tb which might potentially
2213       overlap the flushed page.  */
2214    tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
2215    tb_jmp_cache_clear_page(cpu, addr);
2216}
2217
2218static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
2219                                 struct qht_stats hst)
2220{
2221    uint32_t hgram_opts;
2222    size_t hgram_bins;
2223    char *hgram;
2224
2225    if (!hst.head_buckets) {
2226        return;
2227    }
2228    cpu_fprintf(f, "TB hash buckets     %zu/%zu (%0.2f%% head buckets used)\n",
2229                hst.used_head_buckets, hst.head_buckets,
2230                (double)hst.used_head_buckets / hst.head_buckets * 100);
2231
2232    hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
2233    hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
2234    if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2235        hgram_opts |= QDIST_PR_NODECIMAL;
2236    }
2237    hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2238    cpu_fprintf(f, "TB hash occupancy   %0.2f%% avg chain occ. Histogram: %s\n",
2239                qdist_avg(&hst.occupancy) * 100, hgram);
2240    g_free(hgram);
2241
2242    hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2243    hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2244    if (hgram_bins > 10) {
2245        hgram_bins = 10;
2246    } else {
2247        hgram_bins = 0;
2248        hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2249    }
2250    hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2251    cpu_fprintf(f, "TB hash avg chain   %0.3f buckets. Histogram: %s\n",
2252                qdist_avg(&hst.chain), hgram);
2253    g_free(hgram);
2254}
2255
2256struct tb_tree_stats {
2257    size_t nb_tbs;
2258    size_t host_size;
2259    size_t target_size;
2260    size_t max_target_size;
2261    size_t direct_jmp_count;
2262    size_t direct_jmp2_count;
2263    size_t cross_page;
2264};
2265
2266static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2267{
2268    const TranslationBlock *tb = value;
2269    struct tb_tree_stats *tst = data;
2270
2271    tst->nb_tbs++;
2272    tst->host_size += tb->tc.size;
2273    tst->target_size += tb->size;
2274    if (tb->size > tst->max_target_size) {
2275        tst->max_target_size = tb->size;
2276    }
2277    if (tb->page_addr[1] != -1) {
2278        tst->cross_page++;
2279    }
2280    if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2281        tst->direct_jmp_count++;
2282        if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2283            tst->direct_jmp2_count++;
2284        }
2285    }
2286    return false;
2287}
2288
2289void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
2290{
2291    struct tb_tree_stats tst = {};
2292    struct qht_stats hst;
2293    size_t nb_tbs, flush_full, flush_part, flush_elide;
2294
2295    tcg_tb_foreach(tb_tree_stats_iter, &tst);
2296    nb_tbs = tst.nb_tbs;
2297    /* XXX: avoid using doubles ? */
2298    cpu_fprintf(f, "Translation buffer state:\n");
2299    /*
2300     * Report total code size including the padding and TB structs;
2301     * otherwise users might think "-tb-size" is not honoured.
2302     * For avg host size we use the precise numbers from tb_tree_stats though.
2303     */
2304    cpu_fprintf(f, "gen code size       %zu/%zu\n",
2305                tcg_code_size(), tcg_code_capacity());
2306    cpu_fprintf(f, "TB count            %zu\n", nb_tbs);
2307    cpu_fprintf(f, "TB avg target size  %zu max=%zu bytes\n",
2308                nb_tbs ? tst.target_size / nb_tbs : 0,
2309                tst.max_target_size);
2310    cpu_fprintf(f, "TB avg host size    %zu bytes (expansion ratio: %0.1f)\n",
2311                nb_tbs ? tst.host_size / nb_tbs : 0,
2312                tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2313    cpu_fprintf(f, "cross page TB count %zu (%zu%%)\n", tst.cross_page,
2314            nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2315    cpu_fprintf(f, "direct jump count   %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2316                tst.direct_jmp_count,
2317                nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2318                tst.direct_jmp2_count,
2319                nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2320
2321    qht_statistics_init(&tb_ctx.htable, &hst);
2322    print_qht_statistics(f, cpu_fprintf, hst);
2323    qht_statistics_destroy(&hst);
2324
2325    cpu_fprintf(f, "\nStatistics:\n");
2326    cpu_fprintf(f, "TB flush count      %u\n",
2327                atomic_read(&tb_ctx.tb_flush_count));
2328    cpu_fprintf(f, "TB invalidate count %zu\n", tcg_tb_phys_invalidate_count());
2329
2330    tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2331    cpu_fprintf(f, "TLB full flushes    %zu\n", flush_full);
2332    cpu_fprintf(f, "TLB partial flushes %zu\n", flush_part);
2333    cpu_fprintf(f, "TLB elided flushes  %zu\n", flush_elide);
2334    tcg_dump_info(f, cpu_fprintf);
2335}
2336
2337void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
2338{
2339    tcg_dump_op_count(f, cpu_fprintf);
2340}
2341
2342#else /* CONFIG_USER_ONLY */
2343
2344void cpu_interrupt(CPUState *cpu, int mask)
2345{
2346    g_assert(qemu_mutex_iothread_locked());
2347    cpu->interrupt_request |= mask;
2348    atomic_set(&cpu->icount_decr.u16.high, -1);
2349}
2350
2351/*
2352 * Walks guest process memory "regions" one by one
2353 * and calls callback function 'fn' for each region.
2354 */
2355struct walk_memory_regions_data {
2356    walk_memory_regions_fn fn;
2357    void *priv;
2358    target_ulong start;
2359    int prot;
2360};
2361
2362static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2363                                   target_ulong end, int new_prot)
2364{
2365    if (data->start != -1u) {
2366        int rc = data->fn(data->priv, data->start, end, data->prot);
2367        if (rc != 0) {
2368            return rc;
2369        }
2370    }
2371
2372    data->start = (new_prot ? end : -1u);
2373    data->prot = new_prot;
2374
2375    return 0;
2376}
2377
2378static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2379                                 target_ulong base, int level, void **lp)
2380{
2381    target_ulong pa;
2382    int i, rc;
2383
2384    if (*lp == NULL) {
2385        return walk_memory_regions_end(data, base, 0);
2386    }
2387
2388    if (level == 0) {
2389        PageDesc *pd = *lp;
2390
2391        for (i = 0; i < V_L2_SIZE; ++i) {
2392            int prot = pd[i].flags;
2393
2394            pa = base | (i << TARGET_PAGE_BITS);
2395            if (prot != data->prot) {
2396                rc = walk_memory_regions_end(data, pa, prot);
2397                if (rc != 0) {
2398                    return rc;
2399                }
2400            }
2401        }
2402    } else {
2403        void **pp = *lp;
2404
2405        for (i = 0; i < V_L2_SIZE; ++i) {
2406            pa = base | ((target_ulong)i <<
2407                (TARGET_PAGE_BITS + V_L2_BITS * level));
2408            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2409            if (rc != 0) {
2410                return rc;
2411            }
2412        }
2413    }
2414
2415    return 0;
2416}
2417
2418int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2419{
2420    struct walk_memory_regions_data data;
2421    uintptr_t i, l1_sz = v_l1_size;
2422
2423    data.fn = fn;
2424    data.priv = priv;
2425    data.start = -1u;
2426    data.prot = 0;
2427
2428    for (i = 0; i < l1_sz; i++) {
2429        target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2430        int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2431        if (rc != 0) {
2432            return rc;
2433        }
2434    }
2435
2436    return walk_memory_regions_end(&data, 0, 0);
2437}
2438
2439static int dump_region(void *priv, target_ulong start,
2440    target_ulong end, unsigned long prot)
2441{
2442    FILE *f = (FILE *)priv;
2443
2444    (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2445        " "TARGET_FMT_lx" %c%c%c\n",
2446        start, end, end - start,
2447        ((prot & PAGE_READ) ? 'r' : '-'),
2448        ((prot & PAGE_WRITE) ? 'w' : '-'),
2449        ((prot & PAGE_EXEC) ? 'x' : '-'));
2450
2451    return 0;
2452}
2453
2454/* dump memory mappings */
2455void page_dump(FILE *f)
2456{
2457    const int length = sizeof(target_ulong) * 2;
2458    (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2459            length, "start", length, "end", length, "size", "prot");
2460    walk_memory_regions(f, dump_region);
2461}
2462
2463int page_get_flags(target_ulong address)
2464{
2465    PageDesc *p;
2466
2467    p = page_find(address >> TARGET_PAGE_BITS);
2468    if (!p) {
2469        return 0;
2470    }
2471    return p->flags;
2472}
2473
2474/* Modify the flags of a page and invalidate the code if necessary.
2475   The flag PAGE_WRITE_ORG is positioned automatically depending
2476   on PAGE_WRITE.  The mmap_lock should already be held.  */
2477void page_set_flags(target_ulong start, target_ulong end, int flags)
2478{
2479    target_ulong addr, len;
2480
2481    /* This function should never be called with addresses outside the
2482       guest address space.  If this assert fires, it probably indicates
2483       a missing call to h2g_valid.  */
2484#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2485    assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2486#endif
2487    assert(start < end);
2488    assert_memory_lock();
2489
2490    start = start & TARGET_PAGE_MASK;
2491    end = TARGET_PAGE_ALIGN(end);
2492
2493    if (flags & PAGE_WRITE) {
2494        flags |= PAGE_WRITE_ORG;
2495    }
2496
2497    for (addr = start, len = end - start;
2498         len != 0;
2499         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2500        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2501
2502        /* If the write protection bit is set, then we invalidate
2503           the code inside.  */
2504        if (!(p->flags & PAGE_WRITE) &&
2505            (flags & PAGE_WRITE) &&
2506            p->first_tb) {
2507            tb_invalidate_phys_page(addr, 0);
2508        }
2509        p->flags = flags;
2510    }
2511}
2512
2513int page_check_range(target_ulong start, target_ulong len, int flags)
2514{
2515    PageDesc *p;
2516    target_ulong end;
2517    target_ulong addr;
2518
2519    /* This function should never be called with addresses outside the
2520       guest address space.  If this assert fires, it probably indicates
2521       a missing call to h2g_valid.  */
2522#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2523    assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2524#endif
2525
2526    if (len == 0) {
2527        return 0;
2528    }
2529    if (start + len - 1 < start) {
2530        /* We've wrapped around.  */
2531        return -1;
2532    }
2533
2534    /* must do before we loose bits in the next step */
2535    end = TARGET_PAGE_ALIGN(start + len);
2536    start = start & TARGET_PAGE_MASK;
2537
2538    for (addr = start, len = end - start;
2539         len != 0;
2540         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2541        p = page_find(addr >> TARGET_PAGE_BITS);
2542        if (!p) {
2543            return -1;
2544        }
2545        if (!(p->flags & PAGE_VALID)) {
2546            return -1;
2547        }
2548
2549        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2550            return -1;
2551        }
2552        if (flags & PAGE_WRITE) {
2553            if (!(p->flags & PAGE_WRITE_ORG)) {
2554                return -1;
2555            }
2556            /* unprotect the page if it was put read-only because it
2557               contains translated code */
2558            if (!(p->flags & PAGE_WRITE)) {
2559                if (!page_unprotect(addr, 0)) {
2560                    return -1;
2561                }
2562            }
2563        }
2564    }
2565    return 0;
2566}
2567
2568/* called from signal handler: invalidate the code and unprotect the
2569 * page. Return 0 if the fault was not handled, 1 if it was handled,
2570 * and 2 if it was handled but the caller must cause the TB to be
2571 * immediately exited. (We can only return 2 if the 'pc' argument is
2572 * non-zero.)
2573 */
2574int page_unprotect(target_ulong address, uintptr_t pc)
2575{
2576    unsigned int prot;
2577    bool current_tb_invalidated;
2578    PageDesc *p;
2579    target_ulong host_start, host_end, addr;
2580
2581    /* Technically this isn't safe inside a signal handler.  However we
2582       know this only ever happens in a synchronous SEGV handler, so in
2583       practice it seems to be ok.  */
2584    mmap_lock();
2585
2586    p = page_find(address >> TARGET_PAGE_BITS);
2587    if (!p) {
2588        mmap_unlock();
2589        return 0;
2590    }
2591
2592    /* if the page was really writable, then we change its
2593       protection back to writable */
2594    if (p->flags & PAGE_WRITE_ORG) {
2595        current_tb_invalidated = false;
2596        if (p->flags & PAGE_WRITE) {
2597            /* If the page is actually marked WRITE then assume this is because
2598             * this thread raced with another one which got here first and
2599             * set the page to PAGE_WRITE and did the TB invalidate for us.
2600             */
2601#ifdef TARGET_HAS_PRECISE_SMC
2602            TranslationBlock *current_tb = tcg_tb_lookup(pc);
2603            if (current_tb) {
2604                current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2605            }
2606#endif
2607        } else {
2608            host_start = address & qemu_host_page_mask;
2609            host_end = host_start + qemu_host_page_size;
2610
2611            prot = 0;
2612            for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2613                p = page_find(addr >> TARGET_PAGE_BITS);
2614                p->flags |= PAGE_WRITE;
2615                prot |= p->flags;
2616
2617                /* and since the content will be modified, we must invalidate
2618                   the corresponding translated code. */
2619                current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2620#ifdef CONFIG_USER_ONLY
2621                if (DEBUG_TB_CHECK_GATE) {
2622                    tb_invalidate_check(addr);
2623                }
2624#endif
2625            }
2626            mprotect((void *)g2h(host_start), qemu_host_page_size,
2627                     prot & PAGE_BITS);
2628        }
2629        mmap_unlock();
2630        /* If current TB was invalidated return to main loop */
2631        return current_tb_invalidated ? 2 : 1;
2632    }
2633    mmap_unlock();
2634    return 0;
2635}
2636#endif /* CONFIG_USER_ONLY */
2637
2638/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2639void tcg_flush_softmmu_tlb(CPUState *cs)
2640{
2641#ifdef CONFIG_SOFTMMU
2642    tlb_flush(cs);
2643#endif
2644}
2645