qemu/include/exec/exec-all.h
<<
>>
Prefs
   1/*
   2 * internal execution defines for qemu
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#ifndef EXEC_ALL_H
  21#define EXEC_ALL_H
  22
  23#include "cpu.h"
  24#include "exec/tb-context.h"
  25#ifdef CONFIG_TCG
  26#include "exec/cpu_ldst.h"
  27#endif
  28#include "sysemu/cpus.h"
  29
  30/* allow to see translation results - the slowdown should be negligible, so we leave it */
  31#define DEBUG_DISAS
  32
  33/* Page tracking code uses ram addresses in system mode, and virtual
  34   addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
  35   type.  */
  36#if defined(CONFIG_USER_ONLY)
  37typedef abi_ulong tb_page_addr_t;
  38#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
  39#else
  40typedef ram_addr_t tb_page_addr_t;
  41#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
  42#endif
  43
  44#include "qemu/log.h"
  45
  46void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
  47void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
  48                          target_ulong *data);
  49
  50void cpu_gen_init(void);
  51
  52/**
  53 * cpu_restore_state:
  54 * @cpu: the vCPU state is to be restore to
  55 * @searched_pc: the host PC the fault occurred at
  56 * @will_exit: true if the TB executed will be interrupted after some
  57               cpu adjustments. Required for maintaining the correct
  58               icount valus
  59 * @return: true if state was restored, false otherwise
  60 *
  61 * Attempt to restore the state for a fault occurring in translated
  62 * code. If the searched_pc is not in translated code no state is
  63 * restored and the function returns false.
  64 */
  65bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
  66
  67void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
  68void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
  69TranslationBlock *tb_gen_code(CPUState *cpu,
  70                              target_ulong pc, target_ulong cs_base,
  71                              uint32_t flags,
  72                              int cflags);
  73
  74void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
  75void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
  76void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
  77
  78/**
  79 * cpu_loop_exit_requested:
  80 * @cpu: The CPU state to be tested
  81 *
  82 * Indicate if somebody asked for a return of the CPU to the main loop
  83 * (e.g., via cpu_exit() or cpu_interrupt()).
  84 *
  85 * This is helpful for architectures that support interruptible
  86 * instructions. After writing back all state to registers/memory, this
  87 * call can be used to check if it makes sense to return to the main loop
  88 * or to continue executing the interruptible instruction.
  89 */
  90static inline bool cpu_loop_exit_requested(CPUState *cpu)
  91{
  92    return (int32_t)atomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
  93}
  94
  95#if !defined(CONFIG_USER_ONLY)
  96void cpu_reloading_memory_map(void);
  97/**
  98 * cpu_address_space_init:
  99 * @cpu: CPU to add this address space to
 100 * @asidx: integer index of this address space
 101 * @prefix: prefix to be used as name of address space
 102 * @mr: the root memory region of address space
 103 *
 104 * Add the specified address space to the CPU's cpu_ases list.
 105 * The address space added with @asidx 0 is the one used for the
 106 * convenience pointer cpu->as.
 107 * The target-specific code which registers ASes is responsible
 108 * for defining what semantics address space 0, 1, 2, etc have.
 109 *
 110 * Before the first call to this function, the caller must set
 111 * cpu->num_ases to the total number of address spaces it needs
 112 * to support.
 113 *
 114 * Note that with KVM only one address space is supported.
 115 */
 116void cpu_address_space_init(CPUState *cpu, int asidx,
 117                            const char *prefix, MemoryRegion *mr);
 118#endif
 119
 120#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
 121/* cputlb.c */
 122/**
 123 * tlb_init - initialize a CPU's TLB
 124 * @cpu: CPU whose TLB should be initialized
 125 */
 126void tlb_init(CPUState *cpu);
 127/**
 128 * tlb_flush_page:
 129 * @cpu: CPU whose TLB should be flushed
 130 * @addr: virtual address of page to be flushed
 131 *
 132 * Flush one page from the TLB of the specified CPU, for all
 133 * MMU indexes.
 134 */
 135void tlb_flush_page(CPUState *cpu, target_ulong addr);
 136/**
 137 * tlb_flush_page_all_cpus:
 138 * @cpu: src CPU of the flush
 139 * @addr: virtual address of page to be flushed
 140 *
 141 * Flush one page from the TLB of the specified CPU, for all
 142 * MMU indexes.
 143 */
 144void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
 145/**
 146 * tlb_flush_page_all_cpus_synced:
 147 * @cpu: src CPU of the flush
 148 * @addr: virtual address of page to be flushed
 149 *
 150 * Flush one page from the TLB of the specified CPU, for all MMU
 151 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
 152 * is scheduled as safe work meaning all flushes will be complete once
 153 * the source vCPUs safe work is complete. This will depend on when
 154 * the guests translation ends the TB.
 155 */
 156void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
 157/**
 158 * tlb_flush:
 159 * @cpu: CPU whose TLB should be flushed
 160 *
 161 * Flush the entire TLB for the specified CPU. Most CPU architectures
 162 * allow the implementation to drop entries from the TLB at any time
 163 * so this is generally safe. If more selective flushing is required
 164 * use one of the other functions for efficiency.
 165 */
 166void tlb_flush(CPUState *cpu);
 167/**
 168 * tlb_flush_all_cpus:
 169 * @cpu: src CPU of the flush
 170 */
 171void tlb_flush_all_cpus(CPUState *src_cpu);
 172/**
 173 * tlb_flush_all_cpus_synced:
 174 * @cpu: src CPU of the flush
 175 *
 176 * Like tlb_flush_all_cpus except this except the source vCPUs work is
 177 * scheduled as safe work meaning all flushes will be complete once
 178 * the source vCPUs safe work is complete. This will depend on when
 179 * the guests translation ends the TB.
 180 */
 181void tlb_flush_all_cpus_synced(CPUState *src_cpu);
 182/**
 183 * tlb_flush_page_by_mmuidx:
 184 * @cpu: CPU whose TLB should be flushed
 185 * @addr: virtual address of page to be flushed
 186 * @idxmap: bitmap of MMU indexes to flush
 187 *
 188 * Flush one page from the TLB of the specified CPU, for the specified
 189 * MMU indexes.
 190 */
 191void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
 192                              uint16_t idxmap);
 193/**
 194 * tlb_flush_page_by_mmuidx_all_cpus:
 195 * @cpu: Originating CPU of the flush
 196 * @addr: virtual address of page to be flushed
 197 * @idxmap: bitmap of MMU indexes to flush
 198 *
 199 * Flush one page from the TLB of all CPUs, for the specified
 200 * MMU indexes.
 201 */
 202void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
 203                                       uint16_t idxmap);
 204/**
 205 * tlb_flush_page_by_mmuidx_all_cpus_synced:
 206 * @cpu: Originating CPU of the flush
 207 * @addr: virtual address of page to be flushed
 208 * @idxmap: bitmap of MMU indexes to flush
 209 *
 210 * Flush one page from the TLB of all CPUs, for the specified MMU
 211 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
 212 * vCPUs work is scheduled as safe work meaning all flushes will be
 213 * complete once  the source vCPUs safe work is complete. This will
 214 * depend on when the guests translation ends the TB.
 215 */
 216void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
 217                                              uint16_t idxmap);
 218/**
 219 * tlb_flush_by_mmuidx:
 220 * @cpu: CPU whose TLB should be flushed
 221 * @wait: If true ensure synchronisation by exiting the cpu_loop
 222 * @idxmap: bitmap of MMU indexes to flush
 223 *
 224 * Flush all entries from the TLB of the specified CPU, for the specified
 225 * MMU indexes.
 226 */
 227void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
 228/**
 229 * tlb_flush_by_mmuidx_all_cpus:
 230 * @cpu: Originating CPU of the flush
 231 * @idxmap: bitmap of MMU indexes to flush
 232 *
 233 * Flush all entries from all TLBs of all CPUs, for the specified
 234 * MMU indexes.
 235 */
 236void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
 237/**
 238 * tlb_flush_by_mmuidx_all_cpus_synced:
 239 * @cpu: Originating CPU of the flush
 240 * @idxmap: bitmap of MMU indexes to flush
 241 *
 242 * Flush all entries from all TLBs of all CPUs, for the specified
 243 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
 244 * vCPUs work is scheduled as safe work meaning all flushes will be
 245 * complete once  the source vCPUs safe work is complete. This will
 246 * depend on when the guests translation ends the TB.
 247 */
 248void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
 249/**
 250 * tlb_set_page_with_attrs:
 251 * @cpu: CPU to add this TLB entry for
 252 * @vaddr: virtual address of page to add entry for
 253 * @paddr: physical address of the page
 254 * @attrs: memory transaction attributes
 255 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
 256 * @mmu_idx: MMU index to insert TLB entry for
 257 * @size: size of the page in bytes
 258 *
 259 * Add an entry to this CPU's TLB (a mapping from virtual address
 260 * @vaddr to physical address @paddr) with the specified memory
 261 * transaction attributes. This is generally called by the target CPU
 262 * specific code after it has been called through the tlb_fill()
 263 * entry point and performed a successful page table walk to find
 264 * the physical address and attributes for the virtual address
 265 * which provoked the TLB miss.
 266 *
 267 * At most one entry for a given virtual address is permitted. Only a
 268 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
 269 * used by tlb_flush_page.
 270 */
 271void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
 272                             hwaddr paddr, MemTxAttrs attrs,
 273                             int prot, int mmu_idx, target_ulong size);
 274/* tlb_set_page:
 275 *
 276 * This function is equivalent to calling tlb_set_page_with_attrs()
 277 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
 278 * as a convenience for CPUs which don't use memory transaction attributes.
 279 */
 280void tlb_set_page(CPUState *cpu, target_ulong vaddr,
 281                  hwaddr paddr, int prot,
 282                  int mmu_idx, target_ulong size);
 283#else
 284static inline void tlb_init(CPUState *cpu)
 285{
 286}
 287static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
 288{
 289}
 290static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
 291{
 292}
 293static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
 294                                                  target_ulong addr)
 295{
 296}
 297static inline void tlb_flush(CPUState *cpu)
 298{
 299}
 300static inline void tlb_flush_all_cpus(CPUState *src_cpu)
 301{
 302}
 303static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
 304{
 305}
 306static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
 307                                            target_ulong addr, uint16_t idxmap)
 308{
 309}
 310
 311static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
 312{
 313}
 314static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
 315                                                     target_ulong addr,
 316                                                     uint16_t idxmap)
 317{
 318}
 319static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
 320                                                            target_ulong addr,
 321                                                            uint16_t idxmap)
 322{
 323}
 324static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
 325{
 326}
 327
 328static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
 329                                                       uint16_t idxmap)
 330{
 331}
 332#endif
 333void *probe_access(CPUArchState *env, target_ulong addr, int size,
 334                   MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
 335
 336static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
 337                                int mmu_idx, uintptr_t retaddr)
 338{
 339    return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
 340}
 341
 342#define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
 343
 344/* Estimated block size for TB allocation.  */
 345/* ??? The following is based on a 2015 survey of x86_64 host output.
 346   Better would seem to be some sort of dynamically sized TB array,
 347   adapting to the block sizes actually being produced.  */
 348#if defined(CONFIG_SOFTMMU)
 349#define CODE_GEN_AVG_BLOCK_SIZE 400
 350#else
 351#define CODE_GEN_AVG_BLOCK_SIZE 150
 352#endif
 353
 354/*
 355 * Translation Cache-related fields of a TB.
 356 * This struct exists just for convenience; we keep track of TB's in a binary
 357 * search tree, and the only fields needed to compare TB's in the tree are
 358 * @ptr and @size.
 359 * Note: the address of search data can be obtained by adding @size to @ptr.
 360 */
 361struct tb_tc {
 362    void *ptr;    /* pointer to the translated code */
 363    size_t size;
 364};
 365
 366struct TranslationBlock {
 367    target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
 368    target_ulong cs_base; /* CS base for this block */
 369    uint32_t flags; /* flags defining in which context the code was generated */
 370    uint16_t size;      /* size of target code for this block (1 <=
 371                           size <= TARGET_PAGE_SIZE) */
 372    uint16_t icount;
 373    uint32_t cflags;    /* compile flags */
 374#define CF_COUNT_MASK  0x00007fff
 375#define CF_LAST_IO     0x00008000 /* Last insn may be an IO access.  */
 376#define CF_NOCACHE     0x00010000 /* To be freed after execution */
 377#define CF_USE_ICOUNT  0x00020000
 378#define CF_INVALID     0x00040000 /* TB is stale. Set with @jmp_lock held */
 379#define CF_PARALLEL    0x00080000 /* Generate code for a parallel context */
 380#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
 381#define CF_CLUSTER_SHIFT 24
 382/* cflags' mask for hashing/comparison */
 383#define CF_HASH_MASK   \
 384    (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK)
 385
 386    /* Per-vCPU dynamic tracing state used to generate this TB */
 387    uint32_t trace_vcpu_dstate;
 388
 389    struct tb_tc tc;
 390
 391    /* original tb when cflags has CF_NOCACHE */
 392    struct TranslationBlock *orig_tb;
 393    /* first and second physical page containing code. The lower bit
 394       of the pointer tells the index in page_next[].
 395       The list is protected by the TB's page('s) lock(s) */
 396    uintptr_t page_next[2];
 397    tb_page_addr_t page_addr[2];
 398
 399    /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
 400    QemuSpin jmp_lock;
 401
 402    /* The following data are used to directly call another TB from
 403     * the code of this one. This can be done either by emitting direct or
 404     * indirect native jump instructions. These jumps are reset so that the TB
 405     * just continues its execution. The TB can be linked to another one by
 406     * setting one of the jump targets (or patching the jump instruction). Only
 407     * two of such jumps are supported.
 408     */
 409    uint16_t jmp_reset_offset[2]; /* offset of original jump target */
 410#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
 411    uintptr_t jmp_target_arg[2];  /* target address or offset */
 412
 413    /*
 414     * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
 415     * Each TB can have two outgoing jumps, and therefore can participate
 416     * in two lists. The list entries are kept in jmp_list_next[2]. The least
 417     * significant bit (LSB) of the pointers in these lists is used to encode
 418     * which of the two list entries is to be used in the pointed TB.
 419     *
 420     * List traversals are protected by jmp_lock. The destination TB of each
 421     * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
 422     * can be acquired from any origin TB.
 423     *
 424     * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
 425     * being invalidated, so that no further outgoing jumps from it can be set.
 426     *
 427     * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
 428     * to a destination TB that has CF_INVALID set.
 429     */
 430    uintptr_t jmp_list_head;
 431    uintptr_t jmp_list_next[2];
 432    uintptr_t jmp_dest[2];
 433};
 434
 435extern bool parallel_cpus;
 436
 437/* Hide the atomic_read to make code a little easier on the eyes */
 438static inline uint32_t tb_cflags(const TranslationBlock *tb)
 439{
 440    return atomic_read(&tb->cflags);
 441}
 442
 443/* current cflags for hashing/comparison */
 444static inline uint32_t curr_cflags(void)
 445{
 446    return (parallel_cpus ? CF_PARALLEL : 0)
 447         | (use_icount ? CF_USE_ICOUNT : 0);
 448}
 449
 450/* TranslationBlock invalidate API */
 451#if defined(CONFIG_USER_ONLY)
 452void tb_invalidate_phys_addr(target_ulong addr);
 453void tb_invalidate_phys_range(target_ulong start, target_ulong end);
 454#else
 455void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
 456#endif
 457void tb_flush(CPUState *cpu);
 458void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
 459TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
 460                                   target_ulong cs_base, uint32_t flags,
 461                                   uint32_t cf_mask);
 462void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
 463
 464/* GETPC is the true target of the return instruction that we'll execute.  */
 465#if defined(CONFIG_TCG_INTERPRETER)
 466extern uintptr_t tci_tb_ptr;
 467# define GETPC() tci_tb_ptr
 468#else
 469# define GETPC() \
 470    ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
 471#endif
 472
 473/* The true return address will often point to a host insn that is part of
 474   the next translated guest insn.  Adjust the address backward to point to
 475   the middle of the call insn.  Subtracting one would do the job except for
 476   several compressed mode architectures (arm, mips) which set the low bit
 477   to indicate the compressed mode; subtracting two works around that.  It
 478   is also the case that there are no host isas that contain a call insn
 479   smaller than 4 bytes, so we don't worry about special-casing this.  */
 480#define GETPC_ADJ   2
 481
 482#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
 483void assert_no_pages_locked(void);
 484#else
 485static inline void assert_no_pages_locked(void)
 486{
 487}
 488#endif
 489
 490#if !defined(CONFIG_USER_ONLY)
 491
 492/**
 493 * iotlb_to_section:
 494 * @cpu: CPU performing the access
 495 * @index: TCG CPU IOTLB entry
 496 *
 497 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
 498 * it refers to. @index will have been initially created and returned
 499 * by memory_region_section_get_iotlb().
 500 */
 501struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
 502                                             hwaddr index, MemTxAttrs attrs);
 503#endif
 504
 505#if defined(CONFIG_USER_ONLY)
 506void mmap_lock(void);
 507void mmap_unlock(void);
 508bool have_mmap_lock(void);
 509
 510/**
 511 * get_page_addr_code() - user-mode version
 512 * @env: CPUArchState
 513 * @addr: guest virtual address of guest code
 514 *
 515 * Returns @addr.
 516 */
 517static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
 518                                                target_ulong addr)
 519{
 520    return addr;
 521}
 522
 523/**
 524 * get_page_addr_code_hostp() - user-mode version
 525 * @env: CPUArchState
 526 * @addr: guest virtual address of guest code
 527 *
 528 * Returns @addr.
 529 *
 530 * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
 531 * is kept.
 532 */
 533static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
 534                                                      target_ulong addr,
 535                                                      void **hostp)
 536{
 537    if (hostp) {
 538        *hostp = g2h(addr);
 539    }
 540    return addr;
 541}
 542#else
 543static inline void mmap_lock(void) {}
 544static inline void mmap_unlock(void) {}
 545
 546/**
 547 * get_page_addr_code() - full-system version
 548 * @env: CPUArchState
 549 * @addr: guest virtual address of guest code
 550 *
 551 * If we cannot translate and execute from the entire RAM page, or if
 552 * the region is not backed by RAM, returns -1. Otherwise, returns the
 553 * ram_addr_t corresponding to the guest code at @addr.
 554 *
 555 * Note: this function can trigger an exception.
 556 */
 557tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
 558
 559/**
 560 * get_page_addr_code_hostp() - full-system version
 561 * @env: CPUArchState
 562 * @addr: guest virtual address of guest code
 563 *
 564 * See get_page_addr_code() (full-system version) for documentation on the
 565 * return value.
 566 *
 567 * Sets *@hostp (when @hostp is non-NULL) as follows.
 568 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
 569 * to the host address where @addr's content is kept.
 570 *
 571 * Note: this function can trigger an exception.
 572 */
 573tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
 574                                        void **hostp);
 575
 576void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
 577void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
 578
 579/* exec.c */
 580void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
 581
 582MemoryRegionSection *
 583address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
 584                                  hwaddr *xlat, hwaddr *plen,
 585                                  MemTxAttrs attrs, int *prot);
 586hwaddr memory_region_section_get_iotlb(CPUState *cpu,
 587                                       MemoryRegionSection *section);
 588#endif
 589
 590/* vl.c */
 591extern int singlestep;
 592
 593#endif
 594