qemu/include/exec/exec-all.h
<<
>>
Prefs
   1/*
   2 * internal execution defines for qemu
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#ifndef EXEC_ALL_H
  21#define EXEC_ALL_H
  22
  23#include "cpu.h"
  24#include "exec/tb-context.h"
  25#ifdef CONFIG_TCG
  26#include "exec/cpu_ldst.h"
  27#endif
  28#include "sysemu/cpu-timers.h"
  29
  30/* allow to see translation results - the slowdown should be negligible, so we leave it */
  31#define DEBUG_DISAS
  32
  33/* Page tracking code uses ram addresses in system mode, and virtual
  34   addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
  35   type.  */
  36#if defined(CONFIG_USER_ONLY)
  37typedef abi_ulong tb_page_addr_t;
  38#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
  39#else
  40typedef ram_addr_t tb_page_addr_t;
  41#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
  42#endif
  43
  44#include "qemu/log.h"
  45
  46void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
  47void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
  48                          target_ulong *data);
  49
  50/**
  51 * cpu_restore_state:
  52 * @cpu: the vCPU state is to be restore to
  53 * @searched_pc: the host PC the fault occurred at
  54 * @will_exit: true if the TB executed will be interrupted after some
  55               cpu adjustments. Required for maintaining the correct
  56               icount valus
  57 * @return: true if state was restored, false otherwise
  58 *
  59 * Attempt to restore the state for a fault occurring in translated
  60 * code. If the searched_pc is not in translated code no state is
  61 * restored and the function returns false.
  62 */
  63bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
  64
  65void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
  66void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
  67void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
  68void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
  69
  70/**
  71 * cpu_loop_exit_requested:
  72 * @cpu: The CPU state to be tested
  73 *
  74 * Indicate if somebody asked for a return of the CPU to the main loop
  75 * (e.g., via cpu_exit() or cpu_interrupt()).
  76 *
  77 * This is helpful for architectures that support interruptible
  78 * instructions. After writing back all state to registers/memory, this
  79 * call can be used to check if it makes sense to return to the main loop
  80 * or to continue executing the interruptible instruction.
  81 */
  82static inline bool cpu_loop_exit_requested(CPUState *cpu)
  83{
  84    return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
  85}
  86
  87#if !defined(CONFIG_USER_ONLY)
  88void cpu_reloading_memory_map(void);
  89/**
  90 * cpu_address_space_init:
  91 * @cpu: CPU to add this address space to
  92 * @asidx: integer index of this address space
  93 * @prefix: prefix to be used as name of address space
  94 * @mr: the root memory region of address space
  95 *
  96 * Add the specified address space to the CPU's cpu_ases list.
  97 * The address space added with @asidx 0 is the one used for the
  98 * convenience pointer cpu->as.
  99 * The target-specific code which registers ASes is responsible
 100 * for defining what semantics address space 0, 1, 2, etc have.
 101 *
 102 * Before the first call to this function, the caller must set
 103 * cpu->num_ases to the total number of address spaces it needs
 104 * to support.
 105 *
 106 * Note that with KVM only one address space is supported.
 107 */
 108void cpu_address_space_init(CPUState *cpu, int asidx,
 109                            const char *prefix, MemoryRegion *mr);
 110#endif
 111
 112#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
 113/* cputlb.c */
 114/**
 115 * tlb_init - initialize a CPU's TLB
 116 * @cpu: CPU whose TLB should be initialized
 117 */
 118void tlb_init(CPUState *cpu);
 119/**
 120 * tlb_destroy - destroy a CPU's TLB
 121 * @cpu: CPU whose TLB should be destroyed
 122 */
 123void tlb_destroy(CPUState *cpu);
 124/**
 125 * tlb_flush_page:
 126 * @cpu: CPU whose TLB should be flushed
 127 * @addr: virtual address of page to be flushed
 128 *
 129 * Flush one page from the TLB of the specified CPU, for all
 130 * MMU indexes.
 131 */
 132void tlb_flush_page(CPUState *cpu, target_ulong addr);
 133/**
 134 * tlb_flush_page_all_cpus:
 135 * @cpu: src CPU of the flush
 136 * @addr: virtual address of page to be flushed
 137 *
 138 * Flush one page from the TLB of the specified CPU, for all
 139 * MMU indexes.
 140 */
 141void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
 142/**
 143 * tlb_flush_page_all_cpus_synced:
 144 * @cpu: src CPU of the flush
 145 * @addr: virtual address of page to be flushed
 146 *
 147 * Flush one page from the TLB of the specified CPU, for all MMU
 148 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
 149 * is scheduled as safe work meaning all flushes will be complete once
 150 * the source vCPUs safe work is complete. This will depend on when
 151 * the guests translation ends the TB.
 152 */
 153void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
 154/**
 155 * tlb_flush:
 156 * @cpu: CPU whose TLB should be flushed
 157 *
 158 * Flush the entire TLB for the specified CPU. Most CPU architectures
 159 * allow the implementation to drop entries from the TLB at any time
 160 * so this is generally safe. If more selective flushing is required
 161 * use one of the other functions for efficiency.
 162 */
 163void tlb_flush(CPUState *cpu);
 164/**
 165 * tlb_flush_all_cpus:
 166 * @cpu: src CPU of the flush
 167 */
 168void tlb_flush_all_cpus(CPUState *src_cpu);
 169/**
 170 * tlb_flush_all_cpus_synced:
 171 * @cpu: src CPU of the flush
 172 *
 173 * Like tlb_flush_all_cpus except this except the source vCPUs work is
 174 * scheduled as safe work meaning all flushes will be complete once
 175 * the source vCPUs safe work is complete. This will depend on when
 176 * the guests translation ends the TB.
 177 */
 178void tlb_flush_all_cpus_synced(CPUState *src_cpu);
 179/**
 180 * tlb_flush_page_by_mmuidx:
 181 * @cpu: CPU whose TLB should be flushed
 182 * @addr: virtual address of page to be flushed
 183 * @idxmap: bitmap of MMU indexes to flush
 184 *
 185 * Flush one page from the TLB of the specified CPU, for the specified
 186 * MMU indexes.
 187 */
 188void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
 189                              uint16_t idxmap);
 190/**
 191 * tlb_flush_page_by_mmuidx_all_cpus:
 192 * @cpu: Originating CPU of the flush
 193 * @addr: virtual address of page to be flushed
 194 * @idxmap: bitmap of MMU indexes to flush
 195 *
 196 * Flush one page from the TLB of all CPUs, for the specified
 197 * MMU indexes.
 198 */
 199void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
 200                                       uint16_t idxmap);
 201/**
 202 * tlb_flush_page_by_mmuidx_all_cpus_synced:
 203 * @cpu: Originating CPU of the flush
 204 * @addr: virtual address of page to be flushed
 205 * @idxmap: bitmap of MMU indexes to flush
 206 *
 207 * Flush one page from the TLB of all CPUs, for the specified MMU
 208 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
 209 * vCPUs work is scheduled as safe work meaning all flushes will be
 210 * complete once  the source vCPUs safe work is complete. This will
 211 * depend on when the guests translation ends the TB.
 212 */
 213void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
 214                                              uint16_t idxmap);
 215/**
 216 * tlb_flush_by_mmuidx:
 217 * @cpu: CPU whose TLB should be flushed
 218 * @wait: If true ensure synchronisation by exiting the cpu_loop
 219 * @idxmap: bitmap of MMU indexes to flush
 220 *
 221 * Flush all entries from the TLB of the specified CPU, for the specified
 222 * MMU indexes.
 223 */
 224void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
 225/**
 226 * tlb_flush_by_mmuidx_all_cpus:
 227 * @cpu: Originating CPU of the flush
 228 * @idxmap: bitmap of MMU indexes to flush
 229 *
 230 * Flush all entries from all TLBs of all CPUs, for the specified
 231 * MMU indexes.
 232 */
 233void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
 234/**
 235 * tlb_flush_by_mmuidx_all_cpus_synced:
 236 * @cpu: Originating CPU of the flush
 237 * @idxmap: bitmap of MMU indexes to flush
 238 *
 239 * Flush all entries from all TLBs of all CPUs, for the specified
 240 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
 241 * vCPUs work is scheduled as safe work meaning all flushes will be
 242 * complete once  the source vCPUs safe work is complete. This will
 243 * depend on when the guests translation ends the TB.
 244 */
 245void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
 246
 247/**
 248 * tlb_flush_page_bits_by_mmuidx
 249 * @cpu: CPU whose TLB should be flushed
 250 * @addr: virtual address of page to be flushed
 251 * @idxmap: bitmap of mmu indexes to flush
 252 * @bits: number of significant bits in address
 253 *
 254 * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
 255 */
 256void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
 257                                   uint16_t idxmap, unsigned bits);
 258
 259/* Similarly, with broadcast and syncing. */
 260void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
 261                                            uint16_t idxmap, unsigned bits);
 262void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
 263    (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
 264
 265/**
 266 * tlb_set_page_with_attrs:
 267 * @cpu: CPU to add this TLB entry for
 268 * @vaddr: virtual address of page to add entry for
 269 * @paddr: physical address of the page
 270 * @attrs: memory transaction attributes
 271 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
 272 * @mmu_idx: MMU index to insert TLB entry for
 273 * @size: size of the page in bytes
 274 *
 275 * Add an entry to this CPU's TLB (a mapping from virtual address
 276 * @vaddr to physical address @paddr) with the specified memory
 277 * transaction attributes. This is generally called by the target CPU
 278 * specific code after it has been called through the tlb_fill()
 279 * entry point and performed a successful page table walk to find
 280 * the physical address and attributes for the virtual address
 281 * which provoked the TLB miss.
 282 *
 283 * At most one entry for a given virtual address is permitted. Only a
 284 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
 285 * used by tlb_flush_page.
 286 */
 287void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
 288                             hwaddr paddr, MemTxAttrs attrs,
 289                             int prot, int mmu_idx, target_ulong size);
 290/* tlb_set_page:
 291 *
 292 * This function is equivalent to calling tlb_set_page_with_attrs()
 293 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
 294 * as a convenience for CPUs which don't use memory transaction attributes.
 295 */
 296void tlb_set_page(CPUState *cpu, target_ulong vaddr,
 297                  hwaddr paddr, int prot,
 298                  int mmu_idx, target_ulong size);
 299#else
 300static inline void tlb_init(CPUState *cpu)
 301{
 302}
 303static inline void tlb_destroy(CPUState *cpu)
 304{
 305}
 306static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
 307{
 308}
 309static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
 310{
 311}
 312static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
 313                                                  target_ulong addr)
 314{
 315}
 316static inline void tlb_flush(CPUState *cpu)
 317{
 318}
 319static inline void tlb_flush_all_cpus(CPUState *src_cpu)
 320{
 321}
 322static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
 323{
 324}
 325static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
 326                                            target_ulong addr, uint16_t idxmap)
 327{
 328}
 329
 330static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
 331{
 332}
 333static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
 334                                                     target_ulong addr,
 335                                                     uint16_t idxmap)
 336{
 337}
 338static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
 339                                                            target_ulong addr,
 340                                                            uint16_t idxmap)
 341{
 342}
 343static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
 344{
 345}
 346
 347static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
 348                                                       uint16_t idxmap)
 349{
 350}
 351static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
 352                                                 target_ulong addr,
 353                                                 uint16_t idxmap,
 354                                                 unsigned bits)
 355{
 356}
 357static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
 358                                                          target_ulong addr,
 359                                                          uint16_t idxmap,
 360                                                          unsigned bits)
 361{
 362}
 363static inline void
 364tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
 365                                              uint16_t idxmap, unsigned bits)
 366{
 367}
 368#endif
 369/**
 370 * probe_access:
 371 * @env: CPUArchState
 372 * @addr: guest virtual address to look up
 373 * @size: size of the access
 374 * @access_type: read, write or execute permission
 375 * @mmu_idx: MMU index to use for lookup
 376 * @retaddr: return address for unwinding
 377 *
 378 * Look up the guest virtual address @addr.  Raise an exception if the
 379 * page does not satisfy @access_type.  Raise an exception if the
 380 * access (@addr, @size) hits a watchpoint.  For writes, mark a clean
 381 * page as dirty.
 382 *
 383 * Finally, return the host address for a page that is backed by RAM,
 384 * or NULL if the page requires I/O.
 385 */
 386void *probe_access(CPUArchState *env, target_ulong addr, int size,
 387                   MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
 388
 389static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
 390                                int mmu_idx, uintptr_t retaddr)
 391{
 392    return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
 393}
 394
 395static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
 396                               int mmu_idx, uintptr_t retaddr)
 397{
 398    return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
 399}
 400
 401/**
 402 * probe_access_flags:
 403 * @env: CPUArchState
 404 * @addr: guest virtual address to look up
 405 * @access_type: read, write or execute permission
 406 * @mmu_idx: MMU index to use for lookup
 407 * @nonfault: suppress the fault
 408 * @phost: return value for host address
 409 * @retaddr: return address for unwinding
 410 *
 411 * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
 412 * the page, and storing the host address for RAM in @phost.
 413 *
 414 * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
 415 * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
 416 * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
 417 * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
 418 */
 419int probe_access_flags(CPUArchState *env, target_ulong addr,
 420                       MMUAccessType access_type, int mmu_idx,
 421                       bool nonfault, void **phost, uintptr_t retaddr);
 422
 423#define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
 424
 425/* Estimated block size for TB allocation.  */
 426/* ??? The following is based on a 2015 survey of x86_64 host output.
 427   Better would seem to be some sort of dynamically sized TB array,
 428   adapting to the block sizes actually being produced.  */
 429#if defined(CONFIG_SOFTMMU)
 430#define CODE_GEN_AVG_BLOCK_SIZE 400
 431#else
 432#define CODE_GEN_AVG_BLOCK_SIZE 150
 433#endif
 434
 435/*
 436 * Translation Cache-related fields of a TB.
 437 * This struct exists just for convenience; we keep track of TB's in a binary
 438 * search tree, and the only fields needed to compare TB's in the tree are
 439 * @ptr and @size.
 440 * Note: the address of search data can be obtained by adding @size to @ptr.
 441 */
 442struct tb_tc {
 443    const void *ptr;    /* pointer to the translated code */
 444    size_t size;
 445};
 446
 447struct TranslationBlock {
 448    target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
 449    target_ulong cs_base; /* CS base for this block */
 450    uint32_t flags; /* flags defining in which context the code was generated */
 451    uint32_t cflags;    /* compile flags */
 452#define CF_COUNT_MASK  0x00007fff
 453#define CF_LAST_IO     0x00008000 /* Last insn may be an IO access.  */
 454#define CF_MEMI_ONLY   0x00010000 /* Only instrument memory ops */
 455#define CF_USE_ICOUNT  0x00020000
 456#define CF_INVALID     0x00040000 /* TB is stale. Set with @jmp_lock held */
 457#define CF_PARALLEL    0x00080000 /* Generate code for a parallel context */
 458#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
 459#define CF_CLUSTER_SHIFT 24
 460
 461    /* Per-vCPU dynamic tracing state used to generate this TB */
 462    uint32_t trace_vcpu_dstate;
 463
 464    /*
 465     * Above fields used for comparing
 466     */
 467
 468    /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
 469    uint16_t size;
 470    uint16_t icount;
 471
 472    struct tb_tc tc;
 473
 474    /* first and second physical page containing code. The lower bit
 475       of the pointer tells the index in page_next[].
 476       The list is protected by the TB's page('s) lock(s) */
 477    uintptr_t page_next[2];
 478    tb_page_addr_t page_addr[2];
 479
 480    /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
 481    QemuSpin jmp_lock;
 482
 483    /* The following data are used to directly call another TB from
 484     * the code of this one. This can be done either by emitting direct or
 485     * indirect native jump instructions. These jumps are reset so that the TB
 486     * just continues its execution. The TB can be linked to another one by
 487     * setting one of the jump targets (or patching the jump instruction). Only
 488     * two of such jumps are supported.
 489     */
 490    uint16_t jmp_reset_offset[2]; /* offset of original jump target */
 491#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
 492    uintptr_t jmp_target_arg[2];  /* target address or offset */
 493
 494    /*
 495     * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
 496     * Each TB can have two outgoing jumps, and therefore can participate
 497     * in two lists. The list entries are kept in jmp_list_next[2]. The least
 498     * significant bit (LSB) of the pointers in these lists is used to encode
 499     * which of the two list entries is to be used in the pointed TB.
 500     *
 501     * List traversals are protected by jmp_lock. The destination TB of each
 502     * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
 503     * can be acquired from any origin TB.
 504     *
 505     * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
 506     * being invalidated, so that no further outgoing jumps from it can be set.
 507     *
 508     * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
 509     * to a destination TB that has CF_INVALID set.
 510     */
 511    uintptr_t jmp_list_head;
 512    uintptr_t jmp_list_next[2];
 513    uintptr_t jmp_dest[2];
 514};
 515
 516/* Hide the qatomic_read to make code a little easier on the eyes */
 517static inline uint32_t tb_cflags(const TranslationBlock *tb)
 518{
 519    return qatomic_read(&tb->cflags);
 520}
 521
 522/* current cflags for hashing/comparison */
 523static inline uint32_t curr_cflags(CPUState *cpu)
 524{
 525    return cpu->tcg_cflags;
 526}
 527
 528/* TranslationBlock invalidate API */
 529#if defined(CONFIG_USER_ONLY)
 530void tb_invalidate_phys_addr(target_ulong addr);
 531void tb_invalidate_phys_range(target_ulong start, target_ulong end);
 532#else
 533void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
 534#endif
 535void tb_flush(CPUState *cpu);
 536void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
 537TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
 538                                   target_ulong cs_base, uint32_t flags,
 539                                   uint32_t cflags);
 540void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
 541
 542/* GETPC is the true target of the return instruction that we'll execute.  */
 543#if defined(CONFIG_TCG_INTERPRETER)
 544extern __thread uintptr_t tci_tb_ptr;
 545# define GETPC() tci_tb_ptr
 546#else
 547# define GETPC() \
 548    ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
 549#endif
 550
 551/* The true return address will often point to a host insn that is part of
 552   the next translated guest insn.  Adjust the address backward to point to
 553   the middle of the call insn.  Subtracting one would do the job except for
 554   several compressed mode architectures (arm, mips) which set the low bit
 555   to indicate the compressed mode; subtracting two works around that.  It
 556   is also the case that there are no host isas that contain a call insn
 557   smaller than 4 bytes, so we don't worry about special-casing this.  */
 558#define GETPC_ADJ   2
 559
 560#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
 561void assert_no_pages_locked(void);
 562#else
 563static inline void assert_no_pages_locked(void)
 564{
 565}
 566#endif
 567
 568#if !defined(CONFIG_USER_ONLY)
 569
 570/**
 571 * iotlb_to_section:
 572 * @cpu: CPU performing the access
 573 * @index: TCG CPU IOTLB entry
 574 *
 575 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
 576 * it refers to. @index will have been initially created and returned
 577 * by memory_region_section_get_iotlb().
 578 */
 579struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
 580                                             hwaddr index, MemTxAttrs attrs);
 581#endif
 582
 583#if defined(CONFIG_USER_ONLY)
 584void mmap_lock(void);
 585void mmap_unlock(void);
 586bool have_mmap_lock(void);
 587
 588/**
 589 * get_page_addr_code() - user-mode version
 590 * @env: CPUArchState
 591 * @addr: guest virtual address of guest code
 592 *
 593 * Returns @addr.
 594 */
 595static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
 596                                                target_ulong addr)
 597{
 598    return addr;
 599}
 600
 601/**
 602 * get_page_addr_code_hostp() - user-mode version
 603 * @env: CPUArchState
 604 * @addr: guest virtual address of guest code
 605 *
 606 * Returns @addr.
 607 *
 608 * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
 609 * is kept.
 610 */
 611static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
 612                                                      target_ulong addr,
 613                                                      void **hostp)
 614{
 615    if (hostp) {
 616        *hostp = g2h_untagged(addr);
 617    }
 618    return addr;
 619}
 620#else
 621static inline void mmap_lock(void) {}
 622static inline void mmap_unlock(void) {}
 623
 624/**
 625 * get_page_addr_code() - full-system version
 626 * @env: CPUArchState
 627 * @addr: guest virtual address of guest code
 628 *
 629 * If we cannot translate and execute from the entire RAM page, or if
 630 * the region is not backed by RAM, returns -1. Otherwise, returns the
 631 * ram_addr_t corresponding to the guest code at @addr.
 632 *
 633 * Note: this function can trigger an exception.
 634 */
 635tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
 636
 637/**
 638 * get_page_addr_code_hostp() - full-system version
 639 * @env: CPUArchState
 640 * @addr: guest virtual address of guest code
 641 *
 642 * See get_page_addr_code() (full-system version) for documentation on the
 643 * return value.
 644 *
 645 * Sets *@hostp (when @hostp is non-NULL) as follows.
 646 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
 647 * to the host address where @addr's content is kept.
 648 *
 649 * Note: this function can trigger an exception.
 650 */
 651tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
 652                                        void **hostp);
 653
 654void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
 655void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
 656
 657MemoryRegionSection *
 658address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
 659                                  hwaddr *xlat, hwaddr *plen,
 660                                  MemTxAttrs attrs, int *prot);
 661hwaddr memory_region_section_get_iotlb(CPUState *cpu,
 662                                       MemoryRegionSection *section);
 663#endif
 664
 665#endif
 666