qemu/translate-all.c
<<
>>
Prefs
   1/*
   2 *  Host code generation
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifdef _WIN32
  20#include <windows.h>
  21#else
  22#include <sys/mman.h>
  23#endif
  24#include "qemu/osdep.h"
  25
  26
  27#include "qemu-common.h"
  28#define NO_CPU_IO_DEFS
  29#include "cpu.h"
  30#include "trace.h"
  31#include "disas/disas.h"
  32#include "tcg.h"
  33#if defined(CONFIG_USER_ONLY)
  34#include "qemu.h"
  35#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  36#include <sys/param.h>
  37#if __FreeBSD_version >= 700104
  38#define HAVE_KINFO_GETVMMAP
  39#define sigqueue sigqueue_freebsd  /* avoid redefinition */
  40#include <sys/proc.h>
  41#include <machine/profile.h>
  42#define _KERNEL
  43#include <sys/user.h>
  44#undef _KERNEL
  45#undef sigqueue
  46#include <libutil.h>
  47#endif
  48#endif
  49#else
  50#include "exec/address-spaces.h"
  51#endif
  52
  53#include "exec/cputlb.h"
  54#include "exec/tb-hash.h"
  55#include "translate-all.h"
  56#include "qemu/bitmap.h"
  57#include "qemu/timer.h"
  58#include "exec/log.h"
  59
  60//#define DEBUG_TB_INVALIDATE
  61//#define DEBUG_FLUSH
  62/* make various TB consistency checks */
  63//#define DEBUG_TB_CHECK
  64
  65#if !defined(CONFIG_USER_ONLY)
  66/* TB consistency checks only implemented for usermode emulation.  */
  67#undef DEBUG_TB_CHECK
  68#endif
  69
  70#define SMC_BITMAP_USE_THRESHOLD 10
  71
  72typedef struct PageDesc {
  73    /* list of TBs intersecting this ram page */
  74    TranslationBlock *first_tb;
  75    /* in order to optimize self modifying code, we count the number
  76       of lookups we do to a given page to use a bitmap */
  77    unsigned int code_write_count;
  78    unsigned long *code_bitmap;
  79#if defined(CONFIG_USER_ONLY)
  80    unsigned long flags;
  81#endif
  82} PageDesc;
  83
  84/* In system mode we want L1_MAP to be based on ram offsets,
  85   while in user mode we want it to be based on virtual addresses.  */
  86#if !defined(CONFIG_USER_ONLY)
  87#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
  88# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
  89#else
  90# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
  91#endif
  92#else
  93# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
  94#endif
  95
  96/* Size of the L2 (and L3, etc) page tables.  */
  97#define V_L2_BITS 10
  98#define V_L2_SIZE (1 << V_L2_BITS)
  99
 100/* The bits remaining after N lower levels of page tables.  */
 101#define V_L1_BITS_REM \
 102    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
 103
 104#if V_L1_BITS_REM < 4
 105#define V_L1_BITS  (V_L1_BITS_REM + V_L2_BITS)
 106#else
 107#define V_L1_BITS  V_L1_BITS_REM
 108#endif
 109
 110#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
 111
 112#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
 113
 114uintptr_t qemu_host_page_size;
 115intptr_t qemu_host_page_mask;
 116
 117/* The bottom level has pointers to PageDesc */
 118static void *l1_map[V_L1_SIZE];
 119
 120/* code generation context */
 121TCGContext tcg_ctx;
 122
 123/* translation block context */
 124#ifdef CONFIG_USER_ONLY
 125__thread int have_tb_lock;
 126#endif
 127
 128void tb_lock(void)
 129{
 130#ifdef CONFIG_USER_ONLY
 131    assert(!have_tb_lock);
 132    qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
 133    have_tb_lock++;
 134#endif
 135}
 136
 137void tb_unlock(void)
 138{
 139#ifdef CONFIG_USER_ONLY
 140    assert(have_tb_lock);
 141    have_tb_lock--;
 142    qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
 143#endif
 144}
 145
 146void tb_lock_reset(void)
 147{
 148#ifdef CONFIG_USER_ONLY
 149    if (have_tb_lock) {
 150        qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
 151        have_tb_lock = 0;
 152    }
 153#endif
 154}
 155
 156static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
 157                         tb_page_addr_t phys_page2);
 158static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
 159
 160void cpu_gen_init(void)
 161{
 162    tcg_context_init(&tcg_ctx); 
 163}
 164
 165/* Encode VAL as a signed leb128 sequence at P.
 166   Return P incremented past the encoded value.  */
 167static uint8_t *encode_sleb128(uint8_t *p, target_long val)
 168{
 169    int more, byte;
 170
 171    do {
 172        byte = val & 0x7f;
 173        val >>= 7;
 174        more = !((val == 0 && (byte & 0x40) == 0)
 175                 || (val == -1 && (byte & 0x40) != 0));
 176        if (more) {
 177            byte |= 0x80;
 178        }
 179        *p++ = byte;
 180    } while (more);
 181
 182    return p;
 183}
 184
 185/* Decode a signed leb128 sequence at *PP; increment *PP past the
 186   decoded value.  Return the decoded value.  */
 187static target_long decode_sleb128(uint8_t **pp)
 188{
 189    uint8_t *p = *pp;
 190    target_long val = 0;
 191    int byte, shift = 0;
 192
 193    do {
 194        byte = *p++;
 195        val |= (target_ulong)(byte & 0x7f) << shift;
 196        shift += 7;
 197    } while (byte & 0x80);
 198    if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
 199        val |= -(target_ulong)1 << shift;
 200    }
 201
 202    *pp = p;
 203    return val;
 204}
 205
 206/* Encode the data collected about the instructions while compiling TB.
 207   Place the data at BLOCK, and return the number of bytes consumed.
 208
 209   The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
 210   which come from the target's insn_start data, followed by a uintptr_t
 211   which comes from the host pc of the end of the code implementing the insn.
 212
 213   Each line of the table is encoded as sleb128 deltas from the previous
 214   line.  The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
 215   That is, the first column is seeded with the guest pc, the last column
 216   with the host pc, and the middle columns with zeros.  */
 217
 218static int encode_search(TranslationBlock *tb, uint8_t *block)
 219{
 220    uint8_t *highwater = tcg_ctx.code_gen_highwater;
 221    uint8_t *p = block;
 222    int i, j, n;
 223
 224    tb->tc_search = block;
 225
 226    for (i = 0, n = tb->icount; i < n; ++i) {
 227        target_ulong prev;
 228
 229        for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
 230            if (i == 0) {
 231                prev = (j == 0 ? tb->pc : 0);
 232            } else {
 233                prev = tcg_ctx.gen_insn_data[i - 1][j];
 234            }
 235            p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
 236        }
 237        prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
 238        p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
 239
 240        /* Test for (pending) buffer overflow.  The assumption is that any
 241           one row beginning below the high water mark cannot overrun
 242           the buffer completely.  Thus we can test for overflow after
 243           encoding a row without having to check during encoding.  */
 244        if (unlikely(p > highwater)) {
 245            return -1;
 246        }
 247    }
 248
 249    return p - block;
 250}
 251
 252/* The cpu state corresponding to 'searched_pc' is restored.  */
 253static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
 254                                     uintptr_t searched_pc)
 255{
 256    target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
 257    uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
 258    CPUArchState *env = cpu->env_ptr;
 259    uint8_t *p = tb->tc_search;
 260    int i, j, num_insns = tb->icount;
 261#ifdef CONFIG_PROFILER
 262    int64_t ti = profile_getclock();
 263#endif
 264
 265    if (searched_pc < host_pc) {
 266        return -1;
 267    }
 268
 269    /* Reconstruct the stored insn data while looking for the point at
 270       which the end of the insn exceeds the searched_pc.  */
 271    for (i = 0; i < num_insns; ++i) {
 272        for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
 273            data[j] += decode_sleb128(&p);
 274        }
 275        host_pc += decode_sleb128(&p);
 276        if (host_pc > searched_pc) {
 277            goto found;
 278        }
 279    }
 280    return -1;
 281
 282 found:
 283    if (tb->cflags & CF_USE_ICOUNT) {
 284        assert(use_icount);
 285        /* Reset the cycle counter to the start of the block.  */
 286        cpu->icount_decr.u16.low += num_insns;
 287        /* Clear the IO flag.  */
 288        cpu->can_do_io = 0;
 289    }
 290    cpu->icount_decr.u16.low -= i;
 291    restore_state_to_opc(env, tb, data);
 292
 293#ifdef CONFIG_PROFILER
 294    tcg_ctx.restore_time += profile_getclock() - ti;
 295    tcg_ctx.restore_count++;
 296#endif
 297    return 0;
 298}
 299
 300bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
 301{
 302    TranslationBlock *tb;
 303
 304    tb = tb_find_pc(retaddr);
 305    if (tb) {
 306        cpu_restore_state_from_tb(cpu, tb, retaddr);
 307        if (tb->cflags & CF_NOCACHE) {
 308            /* one-shot translation, invalidate it immediately */
 309            cpu->current_tb = NULL;
 310            tb_phys_invalidate(tb, -1);
 311            tb_free(tb);
 312        }
 313        return true;
 314    }
 315    return false;
 316}
 317
 318void page_size_init(void)
 319{
 320    /* NOTE: we can always suppose that qemu_host_page_size >=
 321       TARGET_PAGE_SIZE */
 322    qemu_real_host_page_size = getpagesize();
 323    qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
 324    if (qemu_host_page_size == 0) {
 325        qemu_host_page_size = qemu_real_host_page_size;
 326    }
 327    if (qemu_host_page_size < TARGET_PAGE_SIZE) {
 328        qemu_host_page_size = TARGET_PAGE_SIZE;
 329    }
 330    qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
 331}
 332
 333static void page_init(void)
 334{
 335    page_size_init();
 336#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
 337    {
 338#ifdef HAVE_KINFO_GETVMMAP
 339        struct kinfo_vmentry *freep;
 340        int i, cnt;
 341
 342        freep = kinfo_getvmmap(getpid(), &cnt);
 343        if (freep) {
 344            mmap_lock();
 345            for (i = 0; i < cnt; i++) {
 346                unsigned long startaddr, endaddr;
 347
 348                startaddr = freep[i].kve_start;
 349                endaddr = freep[i].kve_end;
 350                if (h2g_valid(startaddr)) {
 351                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
 352
 353                    if (h2g_valid(endaddr)) {
 354                        endaddr = h2g(endaddr);
 355                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
 356                    } else {
 357#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
 358                        endaddr = ~0ul;
 359                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
 360#endif
 361                    }
 362                }
 363            }
 364            free(freep);
 365            mmap_unlock();
 366        }
 367#else
 368        FILE *f;
 369
 370        last_brk = (unsigned long)sbrk(0);
 371
 372        f = fopen("/compat/linux/proc/self/maps", "r");
 373        if (f) {
 374            mmap_lock();
 375
 376            do {
 377                unsigned long startaddr, endaddr;
 378                int n;
 379
 380                n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
 381
 382                if (n == 2 && h2g_valid(startaddr)) {
 383                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
 384
 385                    if (h2g_valid(endaddr)) {
 386                        endaddr = h2g(endaddr);
 387                    } else {
 388                        endaddr = ~0ul;
 389                    }
 390                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
 391                }
 392            } while (!feof(f));
 393
 394            fclose(f);
 395            mmap_unlock();
 396        }
 397#endif
 398    }
 399#endif
 400}
 401
 402/* If alloc=1:
 403 * Called with mmap_lock held for user-mode emulation.
 404 */
 405static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
 406{
 407    PageDesc *pd;
 408    void **lp;
 409    int i;
 410
 411    /* Level 1.  Always allocated.  */
 412    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
 413
 414    /* Level 2..N-1.  */
 415    for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
 416        void **p = atomic_rcu_read(lp);
 417
 418        if (p == NULL) {
 419            if (!alloc) {
 420                return NULL;
 421            }
 422            p = g_new0(void *, V_L2_SIZE);
 423            atomic_rcu_set(lp, p);
 424        }
 425
 426        lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
 427    }
 428
 429    pd = atomic_rcu_read(lp);
 430    if (pd == NULL) {
 431        if (!alloc) {
 432            return NULL;
 433        }
 434        pd = g_new0(PageDesc, V_L2_SIZE);
 435        atomic_rcu_set(lp, pd);
 436    }
 437
 438    return pd + (index & (V_L2_SIZE - 1));
 439}
 440
 441static inline PageDesc *page_find(tb_page_addr_t index)
 442{
 443    return page_find_alloc(index, 0);
 444}
 445
 446#if defined(CONFIG_USER_ONLY)
 447/* Currently it is not recommended to allocate big chunks of data in
 448   user mode. It will change when a dedicated libc will be used.  */
 449/* ??? 64-bit hosts ought to have no problem mmaping data outside the
 450   region in which the guest needs to run.  Revisit this.  */
 451#define USE_STATIC_CODE_GEN_BUFFER
 452#endif
 453
 454/* Minimum size of the code gen buffer.  This number is randomly chosen,
 455   but not so small that we can't have a fair number of TB's live.  */
 456#define MIN_CODE_GEN_BUFFER_SIZE     (1024u * 1024)
 457
 458/* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
 459   indicated, this is constrained by the range of direct branches on the
 460   host cpu, as used by the TCG implementation of goto_tb.  */
 461#if defined(__x86_64__)
 462# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
 463#elif defined(__sparc__)
 464# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
 465#elif defined(__powerpc64__)
 466# define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
 467#elif defined(__aarch64__)
 468# define MAX_CODE_GEN_BUFFER_SIZE  (128ul * 1024 * 1024)
 469#elif defined(__arm__)
 470# define MAX_CODE_GEN_BUFFER_SIZE  (16u * 1024 * 1024)
 471#elif defined(__s390x__)
 472  /* We have a +- 4GB range on the branches; leave some slop.  */
 473# define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
 474#elif defined(__mips__)
 475  /* We have a 256MB branch region, but leave room to make sure the
 476     main executable is also within that region.  */
 477# define MAX_CODE_GEN_BUFFER_SIZE  (128ul * 1024 * 1024)
 478#else
 479# define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
 480#endif
 481
 482#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
 483
 484#define DEFAULT_CODE_GEN_BUFFER_SIZE \
 485  (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
 486   ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
 487
 488static inline size_t size_code_gen_buffer(size_t tb_size)
 489{
 490    /* Size the buffer.  */
 491    if (tb_size == 0) {
 492#ifdef USE_STATIC_CODE_GEN_BUFFER
 493        tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
 494#else
 495        /* ??? Needs adjustments.  */
 496        /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
 497           static buffer, we could size this on RESERVED_VA, on the text
 498           segment size of the executable, or continue to use the default.  */
 499        tb_size = (unsigned long)(ram_size / 4);
 500#endif
 501    }
 502    if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
 503        tb_size = MIN_CODE_GEN_BUFFER_SIZE;
 504    }
 505    if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
 506        tb_size = MAX_CODE_GEN_BUFFER_SIZE;
 507    }
 508    tcg_ctx.code_gen_buffer_size = tb_size;
 509    return tb_size;
 510}
 511
 512#ifdef __mips__
 513/* In order to use J and JAL within the code_gen_buffer, we require
 514   that the buffer not cross a 256MB boundary.  */
 515static inline bool cross_256mb(void *addr, size_t size)
 516{
 517    return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
 518}
 519
 520/* We weren't able to allocate a buffer without crossing that boundary,
 521   so make do with the larger portion of the buffer that doesn't cross.
 522   Returns the new base of the buffer, and adjusts code_gen_buffer_size.  */
 523static inline void *split_cross_256mb(void *buf1, size_t size1)
 524{
 525    void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
 526    size_t size2 = buf1 + size1 - buf2;
 527
 528    size1 = buf2 - buf1;
 529    if (size1 < size2) {
 530        size1 = size2;
 531        buf1 = buf2;
 532    }
 533
 534    tcg_ctx.code_gen_buffer_size = size1;
 535    return buf1;
 536}
 537#endif
 538
 539#ifdef USE_STATIC_CODE_GEN_BUFFER
 540static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
 541    __attribute__((aligned(CODE_GEN_ALIGN)));
 542
 543# ifdef _WIN32
 544static inline void do_protect(void *addr, long size, int prot)
 545{
 546    DWORD old_protect;
 547    VirtualProtect(addr, size, prot, &old_protect);
 548}
 549
 550static inline void map_exec(void *addr, long size)
 551{
 552    do_protect(addr, size, PAGE_EXECUTE_READWRITE);
 553}
 554
 555static inline void map_none(void *addr, long size)
 556{
 557    do_protect(addr, size, PAGE_NOACCESS);
 558}
 559# else
 560static inline void do_protect(void *addr, long size, int prot)
 561{
 562    uintptr_t start, end;
 563
 564    start = (uintptr_t)addr;
 565    start &= qemu_real_host_page_mask;
 566
 567    end = (uintptr_t)addr + size;
 568    end = ROUND_UP(end, qemu_real_host_page_size);
 569
 570    mprotect((void *)start, end - start, prot);
 571}
 572
 573static inline void map_exec(void *addr, long size)
 574{
 575    do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
 576}
 577
 578static inline void map_none(void *addr, long size)
 579{
 580    do_protect(addr, size, PROT_NONE);
 581}
 582# endif /* WIN32 */
 583
 584static inline void *alloc_code_gen_buffer(void)
 585{
 586    void *buf = static_code_gen_buffer;
 587    size_t full_size, size;
 588
 589    /* The size of the buffer, rounded down to end on a page boundary.  */
 590    full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
 591                 & qemu_real_host_page_mask) - (uintptr_t)buf;
 592
 593    /* Reserve a guard page.  */
 594    size = full_size - qemu_real_host_page_size;
 595
 596    /* Honor a command-line option limiting the size of the buffer.  */
 597    if (size > tcg_ctx.code_gen_buffer_size) {
 598        size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
 599                & qemu_real_host_page_mask) - (uintptr_t)buf;
 600    }
 601    tcg_ctx.code_gen_buffer_size = size;
 602
 603#ifdef __mips__
 604    if (cross_256mb(buf, size)) {
 605        buf = split_cross_256mb(buf, size);
 606        size = tcg_ctx.code_gen_buffer_size;
 607    }
 608#endif
 609
 610    map_exec(buf, size);
 611    map_none(buf + size, qemu_real_host_page_size);
 612    qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
 613
 614    return buf;
 615}
 616#elif defined(_WIN32)
 617static inline void *alloc_code_gen_buffer(void)
 618{
 619    size_t size = tcg_ctx.code_gen_buffer_size;
 620    void *buf1, *buf2;
 621
 622    /* Perform the allocation in two steps, so that the guard page
 623       is reserved but uncommitted.  */
 624    buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
 625                        MEM_RESERVE, PAGE_NOACCESS);
 626    if (buf1 != NULL) {
 627        buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
 628        assert(buf1 == buf2);
 629    }
 630
 631    return buf1;
 632}
 633#else
 634static inline void *alloc_code_gen_buffer(void)
 635{
 636    int flags = MAP_PRIVATE | MAP_ANONYMOUS;
 637    uintptr_t start = 0;
 638    size_t size = tcg_ctx.code_gen_buffer_size;
 639    void *buf;
 640
 641    /* Constrain the position of the buffer based on the host cpu.
 642       Note that these addresses are chosen in concert with the
 643       addresses assigned in the relevant linker script file.  */
 644# if defined(__PIE__) || defined(__PIC__)
 645    /* Don't bother setting a preferred location if we're building
 646       a position-independent executable.  We're more likely to get
 647       an address near the main executable if we let the kernel
 648       choose the address.  */
 649# elif defined(__x86_64__) && defined(MAP_32BIT)
 650    /* Force the memory down into low memory with the executable.
 651       Leave the choice of exact location with the kernel.  */
 652    flags |= MAP_32BIT;
 653    /* Cannot expect to map more than 800MB in low memory.  */
 654    if (size > 800u * 1024 * 1024) {
 655        tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
 656    }
 657# elif defined(__sparc__)
 658    start = 0x40000000ul;
 659# elif defined(__s390x__)
 660    start = 0x90000000ul;
 661# elif defined(__mips__)
 662#  if _MIPS_SIM == _ABI64
 663    start = 0x128000000ul;
 664#  else
 665    start = 0x08000000ul;
 666#  endif
 667# endif
 668
 669    buf = mmap((void *)start, size + qemu_real_host_page_size,
 670               PROT_NONE, flags, -1, 0);
 671    if (buf == MAP_FAILED) {
 672        return NULL;
 673    }
 674
 675#ifdef __mips__
 676    if (cross_256mb(buf, size)) {
 677        /* Try again, with the original still mapped, to avoid re-acquiring
 678           that 256mb crossing.  This time don't specify an address.  */
 679        size_t size2;
 680        void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
 681                          PROT_NONE, flags, -1, 0);
 682        switch (buf2 != MAP_FAILED) {
 683        case 1:
 684            if (!cross_256mb(buf2, size)) {
 685                /* Success!  Use the new buffer.  */
 686                munmap(buf, size);
 687                break;
 688            }
 689            /* Failure.  Work with what we had.  */
 690            munmap(buf2, size);
 691            /* fallthru */
 692        default:
 693            /* Split the original buffer.  Free the smaller half.  */
 694            buf2 = split_cross_256mb(buf, size);
 695            size2 = tcg_ctx.code_gen_buffer_size;
 696            if (buf == buf2) {
 697                munmap(buf + size2 + qemu_real_host_page_size, size - size2);
 698            } else {
 699                munmap(buf, size - size2);
 700            }
 701            size = size2;
 702            break;
 703        }
 704        buf = buf2;
 705    }
 706#endif
 707
 708    /* Make the final buffer accessible.  The guard page at the end
 709       will remain inaccessible with PROT_NONE.  */
 710    mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
 711
 712    /* Request large pages for the buffer.  */
 713    qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
 714
 715    return buf;
 716}
 717#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
 718
 719static inline void code_gen_alloc(size_t tb_size)
 720{
 721    tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
 722    tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
 723    if (tcg_ctx.code_gen_buffer == NULL) {
 724        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
 725        exit(1);
 726    }
 727
 728    /* Estimate a good size for the number of TBs we can support.  We
 729       still haven't deducted the prologue from the buffer size here,
 730       but that's minimal and won't affect the estimate much.  */
 731    tcg_ctx.code_gen_max_blocks
 732        = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
 733    tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
 734
 735    qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
 736}
 737
 738/* Must be called before using the QEMU cpus. 'tb_size' is the size
 739   (in bytes) allocated to the translation buffer. Zero means default
 740   size. */
 741void tcg_exec_init(unsigned long tb_size)
 742{
 743    cpu_gen_init();
 744    page_init();
 745    code_gen_alloc(tb_size);
 746#if defined(CONFIG_SOFTMMU)
 747    /* There's no guest base to take into account, so go ahead and
 748       initialize the prologue now.  */
 749    tcg_prologue_init(&tcg_ctx);
 750#endif
 751}
 752
 753bool tcg_enabled(void)
 754{
 755    return tcg_ctx.code_gen_buffer != NULL;
 756}
 757
 758/* Allocate a new translation block. Flush the translation buffer if
 759   too many translation blocks or too much generated code. */
 760static TranslationBlock *tb_alloc(target_ulong pc)
 761{
 762    TranslationBlock *tb;
 763
 764    if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
 765        return NULL;
 766    }
 767    tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
 768    tb->pc = pc;
 769    tb->cflags = 0;
 770    return tb;
 771}
 772
 773void tb_free(TranslationBlock *tb)
 774{
 775    /* In practice this is mostly used for single use temporary TB
 776       Ignore the hard cases and just back up if this TB happens to
 777       be the last one generated.  */
 778    if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
 779            tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
 780        tcg_ctx.code_gen_ptr = tb->tc_ptr;
 781        tcg_ctx.tb_ctx.nb_tbs--;
 782    }
 783}
 784
 785static inline void invalidate_page_bitmap(PageDesc *p)
 786{
 787    g_free(p->code_bitmap);
 788    p->code_bitmap = NULL;
 789    p->code_write_count = 0;
 790}
 791
 792/* Set to NULL all the 'first_tb' fields in all PageDescs. */
 793static void page_flush_tb_1(int level, void **lp)
 794{
 795    int i;
 796
 797    if (*lp == NULL) {
 798        return;
 799    }
 800    if (level == 0) {
 801        PageDesc *pd = *lp;
 802
 803        for (i = 0; i < V_L2_SIZE; ++i) {
 804            pd[i].first_tb = NULL;
 805            invalidate_page_bitmap(pd + i);
 806        }
 807    } else {
 808        void **pp = *lp;
 809
 810        for (i = 0; i < V_L2_SIZE; ++i) {
 811            page_flush_tb_1(level - 1, pp + i);
 812        }
 813    }
 814}
 815
 816static void page_flush_tb(void)
 817{
 818    int i;
 819
 820    for (i = 0; i < V_L1_SIZE; i++) {
 821        page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
 822    }
 823}
 824
 825/* flush all the translation blocks */
 826/* XXX: tb_flush is currently not thread safe */
 827void tb_flush(CPUState *cpu)
 828{
 829#if defined(DEBUG_FLUSH)
 830    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
 831           (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
 832           tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
 833           ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
 834           tcg_ctx.tb_ctx.nb_tbs : 0);
 835#endif
 836    if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
 837        > tcg_ctx.code_gen_buffer_size) {
 838        cpu_abort(cpu, "Internal error: code buffer overflow\n");
 839    }
 840    tcg_ctx.tb_ctx.nb_tbs = 0;
 841
 842    CPU_FOREACH(cpu) {
 843        memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
 844    }
 845
 846    memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
 847    page_flush_tb();
 848
 849    tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
 850    /* XXX: flush processor icache at this point if cache flush is
 851       expensive */
 852    tcg_ctx.tb_ctx.tb_flush_count++;
 853}
 854
 855#ifdef DEBUG_TB_CHECK
 856
 857static void tb_invalidate_check(target_ulong address)
 858{
 859    TranslationBlock *tb;
 860    int i;
 861
 862    address &= TARGET_PAGE_MASK;
 863    for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
 864        for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
 865             tb = tb->phys_hash_next) {
 866            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
 867                  address >= tb->pc + tb->size)) {
 868                printf("ERROR invalidate: address=" TARGET_FMT_lx
 869                       " PC=%08lx size=%04x\n",
 870                       address, (long)tb->pc, tb->size);
 871            }
 872        }
 873    }
 874}
 875
 876/* verify that all the pages have correct rights for code */
 877static void tb_page_check(void)
 878{
 879    TranslationBlock *tb;
 880    int i, flags1, flags2;
 881
 882    for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
 883        for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
 884                tb = tb->phys_hash_next) {
 885            flags1 = page_get_flags(tb->pc);
 886            flags2 = page_get_flags(tb->pc + tb->size - 1);
 887            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
 888                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
 889                       (long)tb->pc, tb->size, flags1, flags2);
 890            }
 891        }
 892    }
 893}
 894
 895#endif
 896
 897static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
 898{
 899    TranslationBlock *tb1;
 900
 901    for (;;) {
 902        tb1 = *ptb;
 903        if (tb1 == tb) {
 904            *ptb = tb1->phys_hash_next;
 905            break;
 906        }
 907        ptb = &tb1->phys_hash_next;
 908    }
 909}
 910
 911static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
 912{
 913    TranslationBlock *tb1;
 914    unsigned int n1;
 915
 916    for (;;) {
 917        tb1 = *ptb;
 918        n1 = (uintptr_t)tb1 & 3;
 919        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
 920        if (tb1 == tb) {
 921            *ptb = tb1->page_next[n1];
 922            break;
 923        }
 924        ptb = &tb1->page_next[n1];
 925    }
 926}
 927
 928static inline void tb_jmp_remove(TranslationBlock *tb, int n)
 929{
 930    TranslationBlock *tb1, **ptb;
 931    unsigned int n1;
 932
 933    ptb = &tb->jmp_next[n];
 934    tb1 = *ptb;
 935    if (tb1) {
 936        /* find tb(n) in circular list */
 937        for (;;) {
 938            tb1 = *ptb;
 939            n1 = (uintptr_t)tb1 & 3;
 940            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
 941            if (n1 == n && tb1 == tb) {
 942                break;
 943            }
 944            if (n1 == 2) {
 945                ptb = &tb1->jmp_first;
 946            } else {
 947                ptb = &tb1->jmp_next[n1];
 948            }
 949        }
 950        /* now we can suppress tb(n) from the list */
 951        *ptb = tb->jmp_next[n];
 952
 953        tb->jmp_next[n] = NULL;
 954    }
 955}
 956
 957/* reset the jump entry 'n' of a TB so that it is not chained to
 958   another TB */
 959static inline void tb_reset_jump(TranslationBlock *tb, int n)
 960{
 961    tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
 962}
 963
 964/* invalidate one TB */
 965void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
 966{
 967    CPUState *cpu;
 968    PageDesc *p;
 969    unsigned int h, n1;
 970    tb_page_addr_t phys_pc;
 971    TranslationBlock *tb1, *tb2;
 972
 973    /* remove the TB from the hash list */
 974    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
 975    h = tb_phys_hash_func(phys_pc);
 976    tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
 977
 978    /* remove the TB from the page list */
 979    if (tb->page_addr[0] != page_addr) {
 980        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
 981        tb_page_remove(&p->first_tb, tb);
 982        invalidate_page_bitmap(p);
 983    }
 984    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
 985        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
 986        tb_page_remove(&p->first_tb, tb);
 987        invalidate_page_bitmap(p);
 988    }
 989
 990    tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
 991
 992    /* remove the TB from the hash list */
 993    h = tb_jmp_cache_hash_func(tb->pc);
 994    CPU_FOREACH(cpu) {
 995        if (cpu->tb_jmp_cache[h] == tb) {
 996            cpu->tb_jmp_cache[h] = NULL;
 997        }
 998    }
 999
1000    /* suppress this TB from the two jump lists */
1001    tb_jmp_remove(tb, 0);
1002    tb_jmp_remove(tb, 1);
1003
1004    /* suppress any remaining jumps to this TB */
1005    tb1 = tb->jmp_first;
1006    for (;;) {
1007        n1 = (uintptr_t)tb1 & 3;
1008        if (n1 == 2) {
1009            break;
1010        }
1011        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1012        tb2 = tb1->jmp_next[n1];
1013        tb_reset_jump(tb1, n1);
1014        tb1->jmp_next[n1] = NULL;
1015        tb1 = tb2;
1016    }
1017    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
1018
1019    tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1020}
1021
1022static void build_page_bitmap(PageDesc *p)
1023{
1024    int n, tb_start, tb_end;
1025    TranslationBlock *tb;
1026
1027    p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1028
1029    tb = p->first_tb;
1030    while (tb != NULL) {
1031        n = (uintptr_t)tb & 3;
1032        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1033        /* NOTE: this is subtle as a TB may span two physical pages */
1034        if (n == 0) {
1035            /* NOTE: tb_end may be after the end of the page, but
1036               it is not a problem */
1037            tb_start = tb->pc & ~TARGET_PAGE_MASK;
1038            tb_end = tb_start + tb->size;
1039            if (tb_end > TARGET_PAGE_SIZE) {
1040                tb_end = TARGET_PAGE_SIZE;
1041            }
1042        } else {
1043            tb_start = 0;
1044            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1045        }
1046        bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1047        tb = tb->page_next[n];
1048    }
1049}
1050
1051/* Called with mmap_lock held for user mode emulation.  */
1052TranslationBlock *tb_gen_code(CPUState *cpu,
1053                              target_ulong pc, target_ulong cs_base,
1054                              int flags, int cflags)
1055{
1056    CPUArchState *env = cpu->env_ptr;
1057    TranslationBlock *tb;
1058    tb_page_addr_t phys_pc, phys_page2;
1059    target_ulong virt_page2;
1060    tcg_insn_unit *gen_code_buf;
1061    int gen_code_size, search_size;
1062#ifdef CONFIG_PROFILER
1063    int64_t ti;
1064#endif
1065
1066    phys_pc = get_page_addr_code(env, pc);
1067    if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1068        cflags |= CF_USE_ICOUNT;
1069    }
1070
1071    tb = tb_alloc(pc);
1072    if (unlikely(!tb)) {
1073 buffer_overflow:
1074        /* flush must be done */
1075        tb_flush(cpu);
1076        /* cannot fail at this point */
1077        tb = tb_alloc(pc);
1078        assert(tb != NULL);
1079        /* Don't forget to invalidate previous TB info.  */
1080        tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
1081    }
1082
1083    gen_code_buf = tcg_ctx.code_gen_ptr;
1084    tb->tc_ptr = gen_code_buf;
1085    tb->cs_base = cs_base;
1086    tb->flags = flags;
1087    tb->cflags = cflags;
1088
1089#ifdef CONFIG_PROFILER
1090    tcg_ctx.tb_count1++; /* includes aborted translations because of
1091                       exceptions */
1092    ti = profile_getclock();
1093#endif
1094
1095    tcg_func_start(&tcg_ctx);
1096
1097    gen_intermediate_code(env, tb);
1098
1099    trace_translate_block(tb, tb->pc, tb->tc_ptr);
1100
1101    /* generate machine code */
1102    tb->tb_next_offset[0] = 0xffff;
1103    tb->tb_next_offset[1] = 0xffff;
1104    tcg_ctx.tb_next_offset = tb->tb_next_offset;
1105#ifdef USE_DIRECT_JUMP
1106    tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset;
1107    tcg_ctx.tb_next = NULL;
1108#else
1109    tcg_ctx.tb_jmp_offset = NULL;
1110    tcg_ctx.tb_next = tb->tb_next;
1111#endif
1112
1113#ifdef CONFIG_PROFILER
1114    tcg_ctx.tb_count++;
1115    tcg_ctx.interm_time += profile_getclock() - ti;
1116    tcg_ctx.code_time -= profile_getclock();
1117#endif
1118
1119    /* ??? Overflow could be handled better here.  In particular, we
1120       don't need to re-do gen_intermediate_code, nor should we re-do
1121       the tcg optimization currently hidden inside tcg_gen_code.  All
1122       that should be required is to flush the TBs, allocate a new TB,
1123       re-initialize it per above, and re-do the actual code generation.  */
1124    gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1125    if (unlikely(gen_code_size < 0)) {
1126        goto buffer_overflow;
1127    }
1128    search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1129    if (unlikely(search_size < 0)) {
1130        goto buffer_overflow;
1131    }
1132
1133#ifdef CONFIG_PROFILER
1134    tcg_ctx.code_time += profile_getclock();
1135    tcg_ctx.code_in_len += tb->size;
1136    tcg_ctx.code_out_len += gen_code_size;
1137    tcg_ctx.search_out_len += search_size;
1138#endif
1139
1140#ifdef DEBUG_DISAS
1141    if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1142        qemu_log_in_addr_range(tb->pc)) {
1143        qemu_log("OUT: [size=%d]\n", gen_code_size);
1144        log_disas(tb->tc_ptr, gen_code_size);
1145        qemu_log("\n");
1146        qemu_log_flush();
1147    }
1148#endif
1149
1150    tcg_ctx.code_gen_ptr = (void *)
1151        ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1152                 CODE_GEN_ALIGN);
1153
1154    /* check next page if needed */
1155    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1156    phys_page2 = -1;
1157    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1158        phys_page2 = get_page_addr_code(env, virt_page2);
1159    }
1160    tb_link_page(tb, phys_pc, phys_page2);
1161    return tb;
1162}
1163
1164/*
1165 * Invalidate all TBs which intersect with the target physical address range
1166 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1167 * 'is_cpu_write_access' should be true if called from a real cpu write
1168 * access: the virtual CPU will exit the current TB if code is modified inside
1169 * this TB.
1170 *
1171 * Called with mmap_lock held for user-mode emulation
1172 */
1173void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1174{
1175    while (start < end) {
1176        tb_invalidate_phys_page_range(start, end, 0);
1177        start &= TARGET_PAGE_MASK;
1178        start += TARGET_PAGE_SIZE;
1179    }
1180}
1181
1182/*
1183 * Invalidate all TBs which intersect with the target physical address range
1184 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1185 * 'is_cpu_write_access' should be true if called from a real cpu write
1186 * access: the virtual CPU will exit the current TB if code is modified inside
1187 * this TB.
1188 *
1189 * Called with mmap_lock held for user-mode emulation
1190 */
1191void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1192                                   int is_cpu_write_access)
1193{
1194    TranslationBlock *tb, *tb_next, *saved_tb;
1195    CPUState *cpu = current_cpu;
1196#if defined(TARGET_HAS_PRECISE_SMC)
1197    CPUArchState *env = NULL;
1198#endif
1199    tb_page_addr_t tb_start, tb_end;
1200    PageDesc *p;
1201    int n;
1202#ifdef TARGET_HAS_PRECISE_SMC
1203    int current_tb_not_found = is_cpu_write_access;
1204    TranslationBlock *current_tb = NULL;
1205    int current_tb_modified = 0;
1206    target_ulong current_pc = 0;
1207    target_ulong current_cs_base = 0;
1208    int current_flags = 0;
1209#endif /* TARGET_HAS_PRECISE_SMC */
1210
1211    p = page_find(start >> TARGET_PAGE_BITS);
1212    if (!p) {
1213        return;
1214    }
1215#if defined(TARGET_HAS_PRECISE_SMC)
1216    if (cpu != NULL) {
1217        env = cpu->env_ptr;
1218    }
1219#endif
1220
1221    /* we remove all the TBs in the range [start, end[ */
1222    /* XXX: see if in some cases it could be faster to invalidate all
1223       the code */
1224    tb = p->first_tb;
1225    while (tb != NULL) {
1226        n = (uintptr_t)tb & 3;
1227        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1228        tb_next = tb->page_next[n];
1229        /* NOTE: this is subtle as a TB may span two physical pages */
1230        if (n == 0) {
1231            /* NOTE: tb_end may be after the end of the page, but
1232               it is not a problem */
1233            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1234            tb_end = tb_start + tb->size;
1235        } else {
1236            tb_start = tb->page_addr[1];
1237            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1238        }
1239        if (!(tb_end <= start || tb_start >= end)) {
1240#ifdef TARGET_HAS_PRECISE_SMC
1241            if (current_tb_not_found) {
1242                current_tb_not_found = 0;
1243                current_tb = NULL;
1244                if (cpu->mem_io_pc) {
1245                    /* now we have a real cpu fault */
1246                    current_tb = tb_find_pc(cpu->mem_io_pc);
1247                }
1248            }
1249            if (current_tb == tb &&
1250                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1251                /* If we are modifying the current TB, we must stop
1252                its execution. We could be more precise by checking
1253                that the modification is after the current PC, but it
1254                would require a specialized function to partially
1255                restore the CPU state */
1256
1257                current_tb_modified = 1;
1258                cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1259                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1260                                     &current_flags);
1261            }
1262#endif /* TARGET_HAS_PRECISE_SMC */
1263            /* we need to do that to handle the case where a signal
1264               occurs while doing tb_phys_invalidate() */
1265            saved_tb = NULL;
1266            if (cpu != NULL) {
1267                saved_tb = cpu->current_tb;
1268                cpu->current_tb = NULL;
1269            }
1270            tb_phys_invalidate(tb, -1);
1271            if (cpu != NULL) {
1272                cpu->current_tb = saved_tb;
1273                if (cpu->interrupt_request && cpu->current_tb) {
1274                    cpu_interrupt(cpu, cpu->interrupt_request);
1275                }
1276            }
1277        }
1278        tb = tb_next;
1279    }
1280#if !defined(CONFIG_USER_ONLY)
1281    /* if no code remaining, no need to continue to use slow writes */
1282    if (!p->first_tb) {
1283        invalidate_page_bitmap(p);
1284        tlb_unprotect_code(start);
1285    }
1286#endif
1287#ifdef TARGET_HAS_PRECISE_SMC
1288    if (current_tb_modified) {
1289        /* we generate a block containing just the instruction
1290           modifying the memory. It will ensure that it cannot modify
1291           itself */
1292        cpu->current_tb = NULL;
1293        tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1294        cpu_resume_from_signal(cpu, NULL);
1295    }
1296#endif
1297}
1298
1299/* len must be <= 8 and start must be a multiple of len */
1300void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1301{
1302    PageDesc *p;
1303
1304#if 0
1305    if (1) {
1306        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1307                  cpu_single_env->mem_io_vaddr, len,
1308                  cpu_single_env->eip,
1309                  cpu_single_env->eip +
1310                  (intptr_t)cpu_single_env->segs[R_CS].base);
1311    }
1312#endif
1313    p = page_find(start >> TARGET_PAGE_BITS);
1314    if (!p) {
1315        return;
1316    }
1317    if (!p->code_bitmap &&
1318        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1319        /* build code bitmap */
1320        build_page_bitmap(p);
1321    }
1322    if (p->code_bitmap) {
1323        unsigned int nr;
1324        unsigned long b;
1325
1326        nr = start & ~TARGET_PAGE_MASK;
1327        b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1328        if (b & ((1 << len) - 1)) {
1329            goto do_invalidate;
1330        }
1331    } else {
1332    do_invalidate:
1333        tb_invalidate_phys_page_range(start, start + len, 1);
1334    }
1335}
1336
1337#if !defined(CONFIG_SOFTMMU)
1338/* Called with mmap_lock held.  */
1339static void tb_invalidate_phys_page(tb_page_addr_t addr,
1340                                    uintptr_t pc, void *puc,
1341                                    bool locked)
1342{
1343    TranslationBlock *tb;
1344    PageDesc *p;
1345    int n;
1346#ifdef TARGET_HAS_PRECISE_SMC
1347    TranslationBlock *current_tb = NULL;
1348    CPUState *cpu = current_cpu;
1349    CPUArchState *env = NULL;
1350    int current_tb_modified = 0;
1351    target_ulong current_pc = 0;
1352    target_ulong current_cs_base = 0;
1353    int current_flags = 0;
1354#endif
1355
1356    addr &= TARGET_PAGE_MASK;
1357    p = page_find(addr >> TARGET_PAGE_BITS);
1358    if (!p) {
1359        return;
1360    }
1361    tb = p->first_tb;
1362#ifdef TARGET_HAS_PRECISE_SMC
1363    if (tb && pc != 0) {
1364        current_tb = tb_find_pc(pc);
1365    }
1366    if (cpu != NULL) {
1367        env = cpu->env_ptr;
1368    }
1369#endif
1370    while (tb != NULL) {
1371        n = (uintptr_t)tb & 3;
1372        tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1373#ifdef TARGET_HAS_PRECISE_SMC
1374        if (current_tb == tb &&
1375            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1376                /* If we are modifying the current TB, we must stop
1377                   its execution. We could be more precise by checking
1378                   that the modification is after the current PC, but it
1379                   would require a specialized function to partially
1380                   restore the CPU state */
1381
1382            current_tb_modified = 1;
1383            cpu_restore_state_from_tb(cpu, current_tb, pc);
1384            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1385                                 &current_flags);
1386        }
1387#endif /* TARGET_HAS_PRECISE_SMC */
1388        tb_phys_invalidate(tb, addr);
1389        tb = tb->page_next[n];
1390    }
1391    p->first_tb = NULL;
1392#ifdef TARGET_HAS_PRECISE_SMC
1393    if (current_tb_modified) {
1394        /* we generate a block containing just the instruction
1395           modifying the memory. It will ensure that it cannot modify
1396           itself */
1397        cpu->current_tb = NULL;
1398        tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1399        if (locked) {
1400            mmap_unlock();
1401        }
1402        cpu_resume_from_signal(cpu, puc);
1403    }
1404#endif
1405}
1406#endif
1407
1408/* add the tb in the target page and protect it if necessary
1409 *
1410 * Called with mmap_lock held for user-mode emulation.
1411 */
1412static inline void tb_alloc_page(TranslationBlock *tb,
1413                                 unsigned int n, tb_page_addr_t page_addr)
1414{
1415    PageDesc *p;
1416#ifndef CONFIG_USER_ONLY
1417    bool page_already_protected;
1418#endif
1419
1420    tb->page_addr[n] = page_addr;
1421    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1422    tb->page_next[n] = p->first_tb;
1423#ifndef CONFIG_USER_ONLY
1424    page_already_protected = p->first_tb != NULL;
1425#endif
1426    p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1427    invalidate_page_bitmap(p);
1428
1429#if defined(CONFIG_USER_ONLY)
1430    if (p->flags & PAGE_WRITE) {
1431        target_ulong addr;
1432        PageDesc *p2;
1433        int prot;
1434
1435        /* force the host page as non writable (writes will have a
1436           page fault + mprotect overhead) */
1437        page_addr &= qemu_host_page_mask;
1438        prot = 0;
1439        for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1440            addr += TARGET_PAGE_SIZE) {
1441
1442            p2 = page_find(addr >> TARGET_PAGE_BITS);
1443            if (!p2) {
1444                continue;
1445            }
1446            prot |= p2->flags;
1447            p2->flags &= ~PAGE_WRITE;
1448          }
1449        mprotect(g2h(page_addr), qemu_host_page_size,
1450                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1451#ifdef DEBUG_TB_INVALIDATE
1452        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1453               page_addr);
1454#endif
1455    }
1456#else
1457    /* if some code is already present, then the pages are already
1458       protected. So we handle the case where only the first TB is
1459       allocated in a physical page */
1460    if (!page_already_protected) {
1461        tlb_protect_code(page_addr);
1462    }
1463#endif
1464}
1465
1466/* add a new TB and link it to the physical page tables. phys_page2 is
1467 * (-1) to indicate that only one page contains the TB.
1468 *
1469 * Called with mmap_lock held for user-mode emulation.
1470 */
1471static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1472                         tb_page_addr_t phys_page2)
1473{
1474    unsigned int h;
1475    TranslationBlock **ptb;
1476
1477    /* add in the physical hash table */
1478    h = tb_phys_hash_func(phys_pc);
1479    ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1480    tb->phys_hash_next = *ptb;
1481    *ptb = tb;
1482
1483    /* add in the page list */
1484    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1485    if (phys_page2 != -1) {
1486        tb_alloc_page(tb, 1, phys_page2);
1487    } else {
1488        tb->page_addr[1] = -1;
1489    }
1490
1491    tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1492    tb->jmp_next[0] = NULL;
1493    tb->jmp_next[1] = NULL;
1494
1495    /* init original jump addresses */
1496    if (tb->tb_next_offset[0] != 0xffff) {
1497        tb_reset_jump(tb, 0);
1498    }
1499    if (tb->tb_next_offset[1] != 0xffff) {
1500        tb_reset_jump(tb, 1);
1501    }
1502
1503#ifdef DEBUG_TB_CHECK
1504    tb_page_check();
1505#endif
1506}
1507
1508/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1509   tb[1].tc_ptr. Return NULL if not found */
1510static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1511{
1512    int m_min, m_max, m;
1513    uintptr_t v;
1514    TranslationBlock *tb;
1515
1516    if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1517        return NULL;
1518    }
1519    if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1520        tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1521        return NULL;
1522    }
1523    /* binary search (cf Knuth) */
1524    m_min = 0;
1525    m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1526    while (m_min <= m_max) {
1527        m = (m_min + m_max) >> 1;
1528        tb = &tcg_ctx.tb_ctx.tbs[m];
1529        v = (uintptr_t)tb->tc_ptr;
1530        if (v == tc_ptr) {
1531            return tb;
1532        } else if (tc_ptr < v) {
1533            m_max = m - 1;
1534        } else {
1535            m_min = m + 1;
1536        }
1537    }
1538    return &tcg_ctx.tb_ctx.tbs[m_max];
1539}
1540
1541#if !defined(CONFIG_USER_ONLY)
1542void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1543{
1544    ram_addr_t ram_addr;
1545    MemoryRegion *mr;
1546    hwaddr l = 1;
1547
1548    rcu_read_lock();
1549    mr = address_space_translate(as, addr, &addr, &l, false);
1550    if (!(memory_region_is_ram(mr)
1551          || memory_region_is_romd(mr))) {
1552        rcu_read_unlock();
1553        return;
1554    }
1555    ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1556        + addr;
1557    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1558    rcu_read_unlock();
1559}
1560#endif /* !defined(CONFIG_USER_ONLY) */
1561
1562void tb_check_watchpoint(CPUState *cpu)
1563{
1564    TranslationBlock *tb;
1565
1566    tb = tb_find_pc(cpu->mem_io_pc);
1567    if (tb) {
1568        /* We can use retranslation to find the PC.  */
1569        cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1570        tb_phys_invalidate(tb, -1);
1571    } else {
1572        /* The exception probably happened in a helper.  The CPU state should
1573           have been saved before calling it. Fetch the PC from there.  */
1574        CPUArchState *env = cpu->env_ptr;
1575        target_ulong pc, cs_base;
1576        tb_page_addr_t addr;
1577        int flags;
1578
1579        cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1580        addr = get_page_addr_code(env, pc);
1581        tb_invalidate_phys_range(addr, addr + 1);
1582    }
1583}
1584
1585#ifndef CONFIG_USER_ONLY
1586/* in deterministic execution mode, instructions doing device I/Os
1587   must be at the end of the TB */
1588void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1589{
1590#if defined(TARGET_MIPS) || defined(TARGET_SH4)
1591    CPUArchState *env = cpu->env_ptr;
1592#endif
1593    TranslationBlock *tb;
1594    uint32_t n, cflags;
1595    target_ulong pc, cs_base;
1596    uint64_t flags;
1597
1598    tb = tb_find_pc(retaddr);
1599    if (!tb) {
1600        cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1601                  (void *)retaddr);
1602    }
1603    n = cpu->icount_decr.u16.low + tb->icount;
1604    cpu_restore_state_from_tb(cpu, tb, retaddr);
1605    /* Calculate how many instructions had been executed before the fault
1606       occurred.  */
1607    n = n - cpu->icount_decr.u16.low;
1608    /* Generate a new TB ending on the I/O insn.  */
1609    n++;
1610    /* On MIPS and SH, delay slot instructions can only be restarted if
1611       they were already the first instruction in the TB.  If this is not
1612       the first instruction in a TB then re-execute the preceding
1613       branch.  */
1614#if defined(TARGET_MIPS)
1615    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1616        env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1617        cpu->icount_decr.u16.low++;
1618        env->hflags &= ~MIPS_HFLAG_BMASK;
1619    }
1620#elif defined(TARGET_SH4)
1621    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1622            && n > 1) {
1623        env->pc -= 2;
1624        cpu->icount_decr.u16.low++;
1625        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1626    }
1627#endif
1628    /* This should never happen.  */
1629    if (n > CF_COUNT_MASK) {
1630        cpu_abort(cpu, "TB too big during recompile");
1631    }
1632
1633    cflags = n | CF_LAST_IO;
1634    pc = tb->pc;
1635    cs_base = tb->cs_base;
1636    flags = tb->flags;
1637    tb_phys_invalidate(tb, -1);
1638    if (tb->cflags & CF_NOCACHE) {
1639        if (tb->orig_tb) {
1640            /* Invalidate original TB if this TB was generated in
1641             * cpu_exec_nocache() */
1642            tb_phys_invalidate(tb->orig_tb, -1);
1643        }
1644        tb_free(tb);
1645    }
1646    /* FIXME: In theory this could raise an exception.  In practice
1647       we have already translated the block once so it's probably ok.  */
1648    tb_gen_code(cpu, pc, cs_base, flags, cflags);
1649    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1650       the first in the TB) then we end up generating a whole new TB and
1651       repeating the fault, which is horribly inefficient.
1652       Better would be to execute just this insn uncached, or generate a
1653       second new TB.  */
1654    cpu_resume_from_signal(cpu, NULL);
1655}
1656
1657void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1658{
1659    unsigned int i;
1660
1661    /* Discard jump cache entries for any tb which might potentially
1662       overlap the flushed page.  */
1663    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1664    memset(&cpu->tb_jmp_cache[i], 0,
1665           TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1666
1667    i = tb_jmp_cache_hash_page(addr);
1668    memset(&cpu->tb_jmp_cache[i], 0,
1669           TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1670}
1671
1672void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1673{
1674    int i, target_code_size, max_target_code_size;
1675    int direct_jmp_count, direct_jmp2_count, cross_page;
1676    TranslationBlock *tb;
1677
1678    target_code_size = 0;
1679    max_target_code_size = 0;
1680    cross_page = 0;
1681    direct_jmp_count = 0;
1682    direct_jmp2_count = 0;
1683    for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1684        tb = &tcg_ctx.tb_ctx.tbs[i];
1685        target_code_size += tb->size;
1686        if (tb->size > max_target_code_size) {
1687            max_target_code_size = tb->size;
1688        }
1689        if (tb->page_addr[1] != -1) {
1690            cross_page++;
1691        }
1692        if (tb->tb_next_offset[0] != 0xffff) {
1693            direct_jmp_count++;
1694            if (tb->tb_next_offset[1] != 0xffff) {
1695                direct_jmp2_count++;
1696            }
1697        }
1698    }
1699    /* XXX: avoid using doubles ? */
1700    cpu_fprintf(f, "Translation buffer state:\n");
1701    cpu_fprintf(f, "gen code size       %td/%zd\n",
1702                tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1703                tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1704    cpu_fprintf(f, "TB count            %d/%d\n",
1705            tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1706    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
1707            tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1708                    tcg_ctx.tb_ctx.nb_tbs : 0,
1709            max_target_code_size);
1710    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
1711            tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1712                                     tcg_ctx.code_gen_buffer) /
1713                                     tcg_ctx.tb_ctx.nb_tbs : 0,
1714                target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1715                                             tcg_ctx.code_gen_buffer) /
1716                                             target_code_size : 0);
1717    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1718            tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1719                                    tcg_ctx.tb_ctx.nb_tbs : 0);
1720    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
1721                direct_jmp_count,
1722                tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1723                        tcg_ctx.tb_ctx.nb_tbs : 0,
1724                direct_jmp2_count,
1725                tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1726                        tcg_ctx.tb_ctx.nb_tbs : 0);
1727    cpu_fprintf(f, "\nStatistics:\n");
1728    cpu_fprintf(f, "TB flush count      %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1729    cpu_fprintf(f, "TB invalidate count %d\n",
1730            tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1731    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
1732    tcg_dump_info(f, cpu_fprintf);
1733}
1734
1735void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1736{
1737    tcg_dump_op_count(f, cpu_fprintf);
1738}
1739
1740#else /* CONFIG_USER_ONLY */
1741
1742void cpu_interrupt(CPUState *cpu, int mask)
1743{
1744    cpu->interrupt_request |= mask;
1745    cpu->tcg_exit_req = 1;
1746}
1747
1748/*
1749 * Walks guest process memory "regions" one by one
1750 * and calls callback function 'fn' for each region.
1751 */
1752struct walk_memory_regions_data {
1753    walk_memory_regions_fn fn;
1754    void *priv;
1755    target_ulong start;
1756    int prot;
1757};
1758
1759static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1760                                   target_ulong end, int new_prot)
1761{
1762    if (data->start != -1u) {
1763        int rc = data->fn(data->priv, data->start, end, data->prot);
1764        if (rc != 0) {
1765            return rc;
1766        }
1767    }
1768
1769    data->start = (new_prot ? end : -1u);
1770    data->prot = new_prot;
1771
1772    return 0;
1773}
1774
1775static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1776                                 target_ulong base, int level, void **lp)
1777{
1778    target_ulong pa;
1779    int i, rc;
1780
1781    if (*lp == NULL) {
1782        return walk_memory_regions_end(data, base, 0);
1783    }
1784
1785    if (level == 0) {
1786        PageDesc *pd = *lp;
1787
1788        for (i = 0; i < V_L2_SIZE; ++i) {
1789            int prot = pd[i].flags;
1790
1791            pa = base | (i << TARGET_PAGE_BITS);
1792            if (prot != data->prot) {
1793                rc = walk_memory_regions_end(data, pa, prot);
1794                if (rc != 0) {
1795                    return rc;
1796                }
1797            }
1798        }
1799    } else {
1800        void **pp = *lp;
1801
1802        for (i = 0; i < V_L2_SIZE; ++i) {
1803            pa = base | ((target_ulong)i <<
1804                (TARGET_PAGE_BITS + V_L2_BITS * level));
1805            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1806            if (rc != 0) {
1807                return rc;
1808            }
1809        }
1810    }
1811
1812    return 0;
1813}
1814
1815int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1816{
1817    struct walk_memory_regions_data data;
1818    uintptr_t i;
1819
1820    data.fn = fn;
1821    data.priv = priv;
1822    data.start = -1u;
1823    data.prot = 0;
1824
1825    for (i = 0; i < V_L1_SIZE; i++) {
1826        int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
1827                                       V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
1828        if (rc != 0) {
1829            return rc;
1830        }
1831    }
1832
1833    return walk_memory_regions_end(&data, 0, 0);
1834}
1835
1836static int dump_region(void *priv, target_ulong start,
1837    target_ulong end, unsigned long prot)
1838{
1839    FILE *f = (FILE *)priv;
1840
1841    (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1842        " "TARGET_FMT_lx" %c%c%c\n",
1843        start, end, end - start,
1844        ((prot & PAGE_READ) ? 'r' : '-'),
1845        ((prot & PAGE_WRITE) ? 'w' : '-'),
1846        ((prot & PAGE_EXEC) ? 'x' : '-'));
1847
1848    return 0;
1849}
1850
1851/* dump memory mappings */
1852void page_dump(FILE *f)
1853{
1854    const int length = sizeof(target_ulong) * 2;
1855    (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1856            length, "start", length, "end", length, "size", "prot");
1857    walk_memory_regions(f, dump_region);
1858}
1859
1860int page_get_flags(target_ulong address)
1861{
1862    PageDesc *p;
1863
1864    p = page_find(address >> TARGET_PAGE_BITS);
1865    if (!p) {
1866        return 0;
1867    }
1868    return p->flags;
1869}
1870
1871/* Modify the flags of a page and invalidate the code if necessary.
1872   The flag PAGE_WRITE_ORG is positioned automatically depending
1873   on PAGE_WRITE.  The mmap_lock should already be held.  */
1874void page_set_flags(target_ulong start, target_ulong end, int flags)
1875{
1876    target_ulong addr, len;
1877
1878    /* This function should never be called with addresses outside the
1879       guest address space.  If this assert fires, it probably indicates
1880       a missing call to h2g_valid.  */
1881#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1882    assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1883#endif
1884    assert(start < end);
1885
1886    start = start & TARGET_PAGE_MASK;
1887    end = TARGET_PAGE_ALIGN(end);
1888
1889    if (flags & PAGE_WRITE) {
1890        flags |= PAGE_WRITE_ORG;
1891    }
1892
1893    for (addr = start, len = end - start;
1894         len != 0;
1895         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1896        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1897
1898        /* If the write protection bit is set, then we invalidate
1899           the code inside.  */
1900        if (!(p->flags & PAGE_WRITE) &&
1901            (flags & PAGE_WRITE) &&
1902            p->first_tb) {
1903            tb_invalidate_phys_page(addr, 0, NULL, false);
1904        }
1905        p->flags = flags;
1906    }
1907}
1908
1909int page_check_range(target_ulong start, target_ulong len, int flags)
1910{
1911    PageDesc *p;
1912    target_ulong end;
1913    target_ulong addr;
1914
1915    /* This function should never be called with addresses outside the
1916       guest address space.  If this assert fires, it probably indicates
1917       a missing call to h2g_valid.  */
1918#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1919    assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1920#endif
1921
1922    if (len == 0) {
1923        return 0;
1924    }
1925    if (start + len - 1 < start) {
1926        /* We've wrapped around.  */
1927        return -1;
1928    }
1929
1930    /* must do before we loose bits in the next step */
1931    end = TARGET_PAGE_ALIGN(start + len);
1932    start = start & TARGET_PAGE_MASK;
1933
1934    for (addr = start, len = end - start;
1935         len != 0;
1936         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1937        p = page_find(addr >> TARGET_PAGE_BITS);
1938        if (!p) {
1939            return -1;
1940        }
1941        if (!(p->flags & PAGE_VALID)) {
1942            return -1;
1943        }
1944
1945        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1946            return -1;
1947        }
1948        if (flags & PAGE_WRITE) {
1949            if (!(p->flags & PAGE_WRITE_ORG)) {
1950                return -1;
1951            }
1952            /* unprotect the page if it was put read-only because it
1953               contains translated code */
1954            if (!(p->flags & PAGE_WRITE)) {
1955                if (!page_unprotect(addr, 0, NULL)) {
1956                    return -1;
1957                }
1958            }
1959        }
1960    }
1961    return 0;
1962}
1963
1964/* called from signal handler: invalidate the code and unprotect the
1965   page. Return TRUE if the fault was successfully handled. */
1966int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1967{
1968    unsigned int prot;
1969    PageDesc *p;
1970    target_ulong host_start, host_end, addr;
1971
1972    /* Technically this isn't safe inside a signal handler.  However we
1973       know this only ever happens in a synchronous SEGV handler, so in
1974       practice it seems to be ok.  */
1975    mmap_lock();
1976
1977    p = page_find(address >> TARGET_PAGE_BITS);
1978    if (!p) {
1979        mmap_unlock();
1980        return 0;
1981    }
1982
1983    /* if the page was really writable, then we change its
1984       protection back to writable */
1985    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1986        host_start = address & qemu_host_page_mask;
1987        host_end = host_start + qemu_host_page_size;
1988
1989        prot = 0;
1990        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1991            p = page_find(addr >> TARGET_PAGE_BITS);
1992            p->flags |= PAGE_WRITE;
1993            prot |= p->flags;
1994
1995            /* and since the content will be modified, we must invalidate
1996               the corresponding translated code. */
1997            tb_invalidate_phys_page(addr, pc, puc, true);
1998#ifdef DEBUG_TB_CHECK
1999            tb_invalidate_check(addr);
2000#endif
2001        }
2002        mprotect((void *)g2h(host_start), qemu_host_page_size,
2003                 prot & PAGE_BITS);
2004
2005        mmap_unlock();
2006        return 1;
2007    }
2008    mmap_unlock();
2009    return 0;
2010}
2011#endif /* CONFIG_USER_ONLY */
2012