qemu/include/exec/exec-all.h
<<
>>
Prefs
   1/*
   2 * internal execution defines for qemu
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#ifndef EXEC_ALL_H
  21#define EXEC_ALL_H
  22
  23#include "cpu.h"
  24#ifdef CONFIG_TCG
  25#include "exec/cpu_ldst.h"
  26#endif
  27
  28/* allow to see translation results - the slowdown should be negligible, so we leave it */
  29#define DEBUG_DISAS
  30
  31/* Page tracking code uses ram addresses in system mode, and virtual
  32   addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
  33   type.  */
  34#if defined(CONFIG_USER_ONLY)
  35typedef abi_ulong tb_page_addr_t;
  36#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
  37#else
  38typedef ram_addr_t tb_page_addr_t;
  39#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
  40#endif
  41
  42void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
  43void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
  44                          target_ulong *data);
  45
  46/**
  47 * cpu_restore_state:
  48 * @cpu: the vCPU state is to be restore to
  49 * @searched_pc: the host PC the fault occurred at
  50 * @will_exit: true if the TB executed will be interrupted after some
  51               cpu adjustments. Required for maintaining the correct
  52               icount valus
  53 * @return: true if state was restored, false otherwise
  54 *
  55 * Attempt to restore the state for a fault occurring in translated
  56 * code. If the searched_pc is not in translated code no state is
  57 * restored and the function returns false.
  58 */
  59bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
  60
  61void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
  62void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
  63void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
  64void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
  65
  66/**
  67 * cpu_loop_exit_requested:
  68 * @cpu: The CPU state to be tested
  69 *
  70 * Indicate if somebody asked for a return of the CPU to the main loop
  71 * (e.g., via cpu_exit() or cpu_interrupt()).
  72 *
  73 * This is helpful for architectures that support interruptible
  74 * instructions. After writing back all state to registers/memory, this
  75 * call can be used to check if it makes sense to return to the main loop
  76 * or to continue executing the interruptible instruction.
  77 */
  78static inline bool cpu_loop_exit_requested(CPUState *cpu)
  79{
  80    return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
  81}
  82
  83#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
  84/* cputlb.c */
  85/**
  86 * tlb_init - initialize a CPU's TLB
  87 * @cpu: CPU whose TLB should be initialized
  88 */
  89void tlb_init(CPUState *cpu);
  90/**
  91 * tlb_destroy - destroy a CPU's TLB
  92 * @cpu: CPU whose TLB should be destroyed
  93 */
  94void tlb_destroy(CPUState *cpu);
  95/**
  96 * tlb_flush_page:
  97 * @cpu: CPU whose TLB should be flushed
  98 * @addr: virtual address of page to be flushed
  99 *
 100 * Flush one page from the TLB of the specified CPU, for all
 101 * MMU indexes.
 102 */
 103void tlb_flush_page(CPUState *cpu, target_ulong addr);
 104/**
 105 * tlb_flush_page_all_cpus:
 106 * @cpu: src CPU of the flush
 107 * @addr: virtual address of page to be flushed
 108 *
 109 * Flush one page from the TLB of the specified CPU, for all
 110 * MMU indexes.
 111 */
 112void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
 113/**
 114 * tlb_flush_page_all_cpus_synced:
 115 * @cpu: src CPU of the flush
 116 * @addr: virtual address of page to be flushed
 117 *
 118 * Flush one page from the TLB of the specified CPU, for all MMU
 119 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
 120 * is scheduled as safe work meaning all flushes will be complete once
 121 * the source vCPUs safe work is complete. This will depend on when
 122 * the guests translation ends the TB.
 123 */
 124void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
 125/**
 126 * tlb_flush:
 127 * @cpu: CPU whose TLB should be flushed
 128 *
 129 * Flush the entire TLB for the specified CPU. Most CPU architectures
 130 * allow the implementation to drop entries from the TLB at any time
 131 * so this is generally safe. If more selective flushing is required
 132 * use one of the other functions for efficiency.
 133 */
 134void tlb_flush(CPUState *cpu);
 135/**
 136 * tlb_flush_all_cpus:
 137 * @cpu: src CPU of the flush
 138 */
 139void tlb_flush_all_cpus(CPUState *src_cpu);
 140/**
 141 * tlb_flush_all_cpus_synced:
 142 * @cpu: src CPU of the flush
 143 *
 144 * Like tlb_flush_all_cpus except this except the source vCPUs work is
 145 * scheduled as safe work meaning all flushes will be complete once
 146 * the source vCPUs safe work is complete. This will depend on when
 147 * the guests translation ends the TB.
 148 */
 149void tlb_flush_all_cpus_synced(CPUState *src_cpu);
 150/**
 151 * tlb_flush_page_by_mmuidx:
 152 * @cpu: CPU whose TLB should be flushed
 153 * @addr: virtual address of page to be flushed
 154 * @idxmap: bitmap of MMU indexes to flush
 155 *
 156 * Flush one page from the TLB of the specified CPU, for the specified
 157 * MMU indexes.
 158 */
 159void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
 160                              uint16_t idxmap);
 161/**
 162 * tlb_flush_page_by_mmuidx_all_cpus:
 163 * @cpu: Originating CPU of the flush
 164 * @addr: virtual address of page to be flushed
 165 * @idxmap: bitmap of MMU indexes to flush
 166 *
 167 * Flush one page from the TLB of all CPUs, for the specified
 168 * MMU indexes.
 169 */
 170void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
 171                                       uint16_t idxmap);
 172/**
 173 * tlb_flush_page_by_mmuidx_all_cpus_synced:
 174 * @cpu: Originating CPU of the flush
 175 * @addr: virtual address of page to be flushed
 176 * @idxmap: bitmap of MMU indexes to flush
 177 *
 178 * Flush one page from the TLB of all CPUs, for the specified MMU
 179 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
 180 * vCPUs work is scheduled as safe work meaning all flushes will be
 181 * complete once  the source vCPUs safe work is complete. This will
 182 * depend on when the guests translation ends the TB.
 183 */
 184void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
 185                                              uint16_t idxmap);
 186/**
 187 * tlb_flush_by_mmuidx:
 188 * @cpu: CPU whose TLB should be flushed
 189 * @wait: If true ensure synchronisation by exiting the cpu_loop
 190 * @idxmap: bitmap of MMU indexes to flush
 191 *
 192 * Flush all entries from the TLB of the specified CPU, for the specified
 193 * MMU indexes.
 194 */
 195void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
 196/**
 197 * tlb_flush_by_mmuidx_all_cpus:
 198 * @cpu: Originating CPU of the flush
 199 * @idxmap: bitmap of MMU indexes to flush
 200 *
 201 * Flush all entries from all TLBs of all CPUs, for the specified
 202 * MMU indexes.
 203 */
 204void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
 205/**
 206 * tlb_flush_by_mmuidx_all_cpus_synced:
 207 * @cpu: Originating CPU of the flush
 208 * @idxmap: bitmap of MMU indexes to flush
 209 *
 210 * Flush all entries from all TLBs of all CPUs, for the specified
 211 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
 212 * vCPUs work is scheduled as safe work meaning all flushes will be
 213 * complete once  the source vCPUs safe work is complete. This will
 214 * depend on when the guests translation ends the TB.
 215 */
 216void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
 217
 218/**
 219 * tlb_flush_page_bits_by_mmuidx
 220 * @cpu: CPU whose TLB should be flushed
 221 * @addr: virtual address of page to be flushed
 222 * @idxmap: bitmap of mmu indexes to flush
 223 * @bits: number of significant bits in address
 224 *
 225 * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
 226 */
 227void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
 228                                   uint16_t idxmap, unsigned bits);
 229
 230/* Similarly, with broadcast and syncing. */
 231void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
 232                                            uint16_t idxmap, unsigned bits);
 233void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
 234    (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
 235
 236/**
 237 * tlb_flush_range_by_mmuidx
 238 * @cpu: CPU whose TLB should be flushed
 239 * @addr: virtual address of the start of the range to be flushed
 240 * @len: length of range to be flushed
 241 * @idxmap: bitmap of mmu indexes to flush
 242 * @bits: number of significant bits in address
 243 *
 244 * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
 245 * comparing only the low @bits worth of each virtual page.
 246 */
 247void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
 248                               target_ulong len, uint16_t idxmap,
 249                               unsigned bits);
 250
 251/* Similarly, with broadcast and syncing. */
 252void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
 253                                        target_ulong len, uint16_t idxmap,
 254                                        unsigned bits);
 255void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
 256                                               target_ulong addr,
 257                                               target_ulong len,
 258                                               uint16_t idxmap,
 259                                               unsigned bits);
 260
 261/**
 262 * tlb_set_page_with_attrs:
 263 * @cpu: CPU to add this TLB entry for
 264 * @vaddr: virtual address of page to add entry for
 265 * @paddr: physical address of the page
 266 * @attrs: memory transaction attributes
 267 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
 268 * @mmu_idx: MMU index to insert TLB entry for
 269 * @size: size of the page in bytes
 270 *
 271 * Add an entry to this CPU's TLB (a mapping from virtual address
 272 * @vaddr to physical address @paddr) with the specified memory
 273 * transaction attributes. This is generally called by the target CPU
 274 * specific code after it has been called through the tlb_fill()
 275 * entry point and performed a successful page table walk to find
 276 * the physical address and attributes for the virtual address
 277 * which provoked the TLB miss.
 278 *
 279 * At most one entry for a given virtual address is permitted. Only a
 280 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
 281 * used by tlb_flush_page.
 282 */
 283void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
 284                             hwaddr paddr, MemTxAttrs attrs,
 285                             int prot, int mmu_idx, target_ulong size);
 286/* tlb_set_page:
 287 *
 288 * This function is equivalent to calling tlb_set_page_with_attrs()
 289 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
 290 * as a convenience for CPUs which don't use memory transaction attributes.
 291 */
 292void tlb_set_page(CPUState *cpu, target_ulong vaddr,
 293                  hwaddr paddr, int prot,
 294                  int mmu_idx, target_ulong size);
 295#else
 296static inline void tlb_init(CPUState *cpu)
 297{
 298}
 299static inline void tlb_destroy(CPUState *cpu)
 300{
 301}
 302static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
 303{
 304}
 305static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
 306{
 307}
 308static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
 309                                                  target_ulong addr)
 310{
 311}
 312static inline void tlb_flush(CPUState *cpu)
 313{
 314}
 315static inline void tlb_flush_all_cpus(CPUState *src_cpu)
 316{
 317}
 318static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
 319{
 320}
 321static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
 322                                            target_ulong addr, uint16_t idxmap)
 323{
 324}
 325
 326static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
 327{
 328}
 329static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
 330                                                     target_ulong addr,
 331                                                     uint16_t idxmap)
 332{
 333}
 334static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
 335                                                            target_ulong addr,
 336                                                            uint16_t idxmap)
 337{
 338}
 339static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
 340{
 341}
 342
 343static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
 344                                                       uint16_t idxmap)
 345{
 346}
 347static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
 348                                                 target_ulong addr,
 349                                                 uint16_t idxmap,
 350                                                 unsigned bits)
 351{
 352}
 353static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
 354                                                          target_ulong addr,
 355                                                          uint16_t idxmap,
 356                                                          unsigned bits)
 357{
 358}
 359static inline void
 360tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
 361                                              uint16_t idxmap, unsigned bits)
 362{
 363}
 364static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
 365                                             target_ulong len, uint16_t idxmap,
 366                                             unsigned bits)
 367{
 368}
 369static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
 370                                                      target_ulong addr,
 371                                                      target_ulong len,
 372                                                      uint16_t idxmap,
 373                                                      unsigned bits)
 374{
 375}
 376static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
 377                                                             target_ulong addr,
 378                                                             target_long len,
 379                                                             uint16_t idxmap,
 380                                                             unsigned bits)
 381{
 382}
 383#endif
 384/**
 385 * probe_access:
 386 * @env: CPUArchState
 387 * @addr: guest virtual address to look up
 388 * @size: size of the access
 389 * @access_type: read, write or execute permission
 390 * @mmu_idx: MMU index to use for lookup
 391 * @retaddr: return address for unwinding
 392 *
 393 * Look up the guest virtual address @addr.  Raise an exception if the
 394 * page does not satisfy @access_type.  Raise an exception if the
 395 * access (@addr, @size) hits a watchpoint.  For writes, mark a clean
 396 * page as dirty.
 397 *
 398 * Finally, return the host address for a page that is backed by RAM,
 399 * or NULL if the page requires I/O.
 400 */
 401void *probe_access(CPUArchState *env, target_ulong addr, int size,
 402                   MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
 403
 404static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
 405                                int mmu_idx, uintptr_t retaddr)
 406{
 407    return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
 408}
 409
 410static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
 411                               int mmu_idx, uintptr_t retaddr)
 412{
 413    return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
 414}
 415
 416/**
 417 * probe_access_flags:
 418 * @env: CPUArchState
 419 * @addr: guest virtual address to look up
 420 * @access_type: read, write or execute permission
 421 * @mmu_idx: MMU index to use for lookup
 422 * @nonfault: suppress the fault
 423 * @phost: return value for host address
 424 * @retaddr: return address for unwinding
 425 *
 426 * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
 427 * the page, and storing the host address for RAM in @phost.
 428 *
 429 * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
 430 * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
 431 * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
 432 * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
 433 */
 434int probe_access_flags(CPUArchState *env, target_ulong addr,
 435                       MMUAccessType access_type, int mmu_idx,
 436                       bool nonfault, void **phost, uintptr_t retaddr);
 437
 438#define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
 439
 440/* Estimated block size for TB allocation.  */
 441/* ??? The following is based on a 2015 survey of x86_64 host output.
 442   Better would seem to be some sort of dynamically sized TB array,
 443   adapting to the block sizes actually being produced.  */
 444#if defined(CONFIG_SOFTMMU)
 445#define CODE_GEN_AVG_BLOCK_SIZE 400
 446#else
 447#define CODE_GEN_AVG_BLOCK_SIZE 150
 448#endif
 449
 450/*
 451 * Translation Cache-related fields of a TB.
 452 * This struct exists just for convenience; we keep track of TB's in a binary
 453 * search tree, and the only fields needed to compare TB's in the tree are
 454 * @ptr and @size.
 455 * Note: the address of search data can be obtained by adding @size to @ptr.
 456 */
 457struct tb_tc {
 458    const void *ptr;    /* pointer to the translated code */
 459    size_t size;
 460};
 461
 462struct TranslationBlock {
 463    target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
 464    target_ulong cs_base; /* CS base for this block */
 465    uint32_t flags; /* flags defining in which context the code was generated */
 466    uint32_t cflags;    /* compile flags */
 467
 468/* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
 469#define CF_COUNT_MASK    0x000001ff
 470#define CF_NO_GOTO_TB    0x00000200 /* Do not chain with goto_tb */
 471#define CF_NO_GOTO_PTR   0x00000400 /* Do not chain with goto_ptr */
 472#define CF_SINGLE_STEP   0x00000800 /* gdbstub single-step in effect */
 473#define CF_LAST_IO       0x00008000 /* Last insn may be an IO access.  */
 474#define CF_MEMI_ONLY     0x00010000 /* Only instrument memory ops */
 475#define CF_USE_ICOUNT    0x00020000
 476#define CF_INVALID       0x00040000 /* TB is stale. Set with @jmp_lock held */
 477#define CF_PARALLEL      0x00080000 /* Generate code for a parallel context */
 478#define CF_NOIRQ         0x00100000 /* Generate an uninterruptible TB */
 479#define CF_CLUSTER_MASK  0xff000000 /* Top 8 bits are cluster ID */
 480#define CF_CLUSTER_SHIFT 24
 481
 482    /* Per-vCPU dynamic tracing state used to generate this TB */
 483    uint32_t trace_vcpu_dstate;
 484
 485    /*
 486     * Above fields used for comparing
 487     */
 488
 489    /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
 490    uint16_t size;
 491    uint16_t icount;
 492
 493    struct tb_tc tc;
 494
 495    /* first and second physical page containing code. The lower bit
 496       of the pointer tells the index in page_next[].
 497       The list is protected by the TB's page('s) lock(s) */
 498    uintptr_t page_next[2];
 499    tb_page_addr_t page_addr[2];
 500
 501    /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
 502    QemuSpin jmp_lock;
 503
 504    /* The following data are used to directly call another TB from
 505     * the code of this one. This can be done either by emitting direct or
 506     * indirect native jump instructions. These jumps are reset so that the TB
 507     * just continues its execution. The TB can be linked to another one by
 508     * setting one of the jump targets (or patching the jump instruction). Only
 509     * two of such jumps are supported.
 510     */
 511    uint16_t jmp_reset_offset[2]; /* offset of original jump target */
 512#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
 513    uintptr_t jmp_target_arg[2];  /* target address or offset */
 514
 515    /*
 516     * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
 517     * Each TB can have two outgoing jumps, and therefore can participate
 518     * in two lists. The list entries are kept in jmp_list_next[2]. The least
 519     * significant bit (LSB) of the pointers in these lists is used to encode
 520     * which of the two list entries is to be used in the pointed TB.
 521     *
 522     * List traversals are protected by jmp_lock. The destination TB of each
 523     * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
 524     * can be acquired from any origin TB.
 525     *
 526     * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
 527     * being invalidated, so that no further outgoing jumps from it can be set.
 528     *
 529     * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
 530     * to a destination TB that has CF_INVALID set.
 531     */
 532    uintptr_t jmp_list_head;
 533    uintptr_t jmp_list_next[2];
 534    uintptr_t jmp_dest[2];
 535};
 536
 537/* Hide the qatomic_read to make code a little easier on the eyes */
 538static inline uint32_t tb_cflags(const TranslationBlock *tb)
 539{
 540    return qatomic_read(&tb->cflags);
 541}
 542
 543/* current cflags for hashing/comparison */
 544uint32_t curr_cflags(CPUState *cpu);
 545
 546/* TranslationBlock invalidate API */
 547#if defined(CONFIG_USER_ONLY)
 548void tb_invalidate_phys_addr(target_ulong addr);
 549void tb_invalidate_phys_range(target_ulong start, target_ulong end);
 550#else
 551void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
 552#endif
 553void tb_flush(CPUState *cpu);
 554void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
 555TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
 556                                   target_ulong cs_base, uint32_t flags,
 557                                   uint32_t cflags);
 558void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
 559
 560/* GETPC is the true target of the return instruction that we'll execute.  */
 561#if defined(CONFIG_TCG_INTERPRETER)
 562extern __thread uintptr_t tci_tb_ptr;
 563# define GETPC() tci_tb_ptr
 564#else
 565# define GETPC() \
 566    ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
 567#endif
 568
 569/* The true return address will often point to a host insn that is part of
 570   the next translated guest insn.  Adjust the address backward to point to
 571   the middle of the call insn.  Subtracting one would do the job except for
 572   several compressed mode architectures (arm, mips) which set the low bit
 573   to indicate the compressed mode; subtracting two works around that.  It
 574   is also the case that there are no host isas that contain a call insn
 575   smaller than 4 bytes, so we don't worry about special-casing this.  */
 576#define GETPC_ADJ   2
 577
 578#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
 579void assert_no_pages_locked(void);
 580#else
 581static inline void assert_no_pages_locked(void)
 582{
 583}
 584#endif
 585
 586#if !defined(CONFIG_USER_ONLY)
 587
 588/**
 589 * iotlb_to_section:
 590 * @cpu: CPU performing the access
 591 * @index: TCG CPU IOTLB entry
 592 *
 593 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
 594 * it refers to. @index will have been initially created and returned
 595 * by memory_region_section_get_iotlb().
 596 */
 597struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
 598                                             hwaddr index, MemTxAttrs attrs);
 599#endif
 600
 601#if defined(CONFIG_USER_ONLY)
 602void mmap_lock(void);
 603void mmap_unlock(void);
 604bool have_mmap_lock(void);
 605
 606/**
 607 * get_page_addr_code() - user-mode version
 608 * @env: CPUArchState
 609 * @addr: guest virtual address of guest code
 610 *
 611 * Returns @addr.
 612 */
 613static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
 614                                                target_ulong addr)
 615{
 616    return addr;
 617}
 618
 619/**
 620 * get_page_addr_code_hostp() - user-mode version
 621 * @env: CPUArchState
 622 * @addr: guest virtual address of guest code
 623 *
 624 * Returns @addr.
 625 *
 626 * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
 627 * is kept.
 628 */
 629static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
 630                                                      target_ulong addr,
 631                                                      void **hostp)
 632{
 633    if (hostp) {
 634        *hostp = g2h_untagged(addr);
 635    }
 636    return addr;
 637}
 638
 639/**
 640 * adjust_signal_pc:
 641 * @pc: raw pc from the host signal ucontext_t.
 642 * @is_write: host memory operation was write, or read-modify-write.
 643 *
 644 * Alter @pc as required for unwinding.  Return the type of the
 645 * guest memory access -- host reads may be for guest execution.
 646 */
 647MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
 648
 649/**
 650 * handle_sigsegv_accerr_write:
 651 * @cpu: the cpu context
 652 * @old_set: the sigset_t from the signal ucontext_t
 653 * @host_pc: the host pc, adjusted for the signal
 654 * @host_addr: the host address of the fault
 655 *
 656 * Return true if the write fault has been handled, and should be re-tried.
 657 */
 658bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
 659                                 uintptr_t host_pc, abi_ptr guest_addr);
 660
 661/**
 662 * cpu_loop_exit_sigsegv:
 663 * @cpu: the cpu context
 664 * @addr: the guest address of the fault
 665 * @access_type: access was read/write/execute
 666 * @maperr: true for invalid page, false for permission fault
 667 * @ra: host pc for unwinding
 668 *
 669 * Use the TCGCPUOps hook to record cpu state, do guest operating system
 670 * specific things to raise SIGSEGV, and jump to the main cpu loop.
 671 */
 672void QEMU_NORETURN cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
 673                                         MMUAccessType access_type,
 674                                         bool maperr, uintptr_t ra);
 675
 676/**
 677 * cpu_loop_exit_sigbus:
 678 * @cpu: the cpu context
 679 * @addr: the guest address of the alignment fault
 680 * @access_type: access was read/write/execute
 681 * @ra: host pc for unwinding
 682 *
 683 * Use the TCGCPUOps hook to record cpu state, do guest operating system
 684 * specific things to raise SIGBUS, and jump to the main cpu loop.
 685 */
 686void QEMU_NORETURN cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
 687                                        MMUAccessType access_type,
 688                                        uintptr_t ra);
 689
 690#else
 691static inline void mmap_lock(void) {}
 692static inline void mmap_unlock(void) {}
 693
 694/**
 695 * get_page_addr_code() - full-system version
 696 * @env: CPUArchState
 697 * @addr: guest virtual address of guest code
 698 *
 699 * If we cannot translate and execute from the entire RAM page, or if
 700 * the region is not backed by RAM, returns -1. Otherwise, returns the
 701 * ram_addr_t corresponding to the guest code at @addr.
 702 *
 703 * Note: this function can trigger an exception.
 704 */
 705tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
 706
 707/**
 708 * get_page_addr_code_hostp() - full-system version
 709 * @env: CPUArchState
 710 * @addr: guest virtual address of guest code
 711 *
 712 * See get_page_addr_code() (full-system version) for documentation on the
 713 * return value.
 714 *
 715 * Sets *@hostp (when @hostp is non-NULL) as follows.
 716 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
 717 * to the host address where @addr's content is kept.
 718 *
 719 * Note: this function can trigger an exception.
 720 */
 721tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
 722                                        void **hostp);
 723
 724void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
 725void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
 726
 727MemoryRegionSection *
 728address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
 729                                  hwaddr *xlat, hwaddr *plen,
 730                                  MemTxAttrs attrs, int *prot);
 731hwaddr memory_region_section_get_iotlb(CPUState *cpu,
 732                                       MemoryRegionSection *section);
 733#endif
 734
 735#endif
 736