qemu/include/exec/exec-all.h
<<
>>
Prefs
   1/*
   2 * internal execution defines for qemu
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#ifndef EXEC_ALL_H
  21#define EXEC_ALL_H
  22
  23#include "exec/tb-context.h"
  24#include "sysemu/cpus.h"
  25
  26/* allow to see translation results - the slowdown should be negligible, so we leave it */
  27#define DEBUG_DISAS
  28
  29/* Page tracking code uses ram addresses in system mode, and virtual
  30   addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
  31   type.  */
  32#if defined(CONFIG_USER_ONLY)
  33typedef abi_ulong tb_page_addr_t;
  34#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
  35#else
  36typedef ram_addr_t tb_page_addr_t;
  37#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
  38#endif
  39
  40#include "qemu/log.h"
  41
  42void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
  43void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
  44                          target_ulong *data);
  45
  46void cpu_gen_init(void);
  47
  48/**
  49 * cpu_restore_state:
  50 * @cpu: the vCPU state is to be restore to
  51 * @searched_pc: the host PC the fault occurred at
  52 * @will_exit: true if the TB executed will be interrupted after some
  53               cpu adjustments. Required for maintaining the correct
  54               icount valus
  55 * @return: true if state was restored, false otherwise
  56 *
  57 * Attempt to restore the state for a fault occurring in translated
  58 * code. If the searched_pc is not in translated code no state is
  59 * restored and the function returns false.
  60 */
  61bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
  62
  63void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
  64void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
  65TranslationBlock *tb_gen_code(CPUState *cpu,
  66                              target_ulong pc, target_ulong cs_base,
  67                              uint32_t flags,
  68                              int cflags);
  69
  70void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
  71void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
  72void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
  73
  74#if !defined(CONFIG_USER_ONLY)
  75void cpu_reloading_memory_map(void);
  76/**
  77 * cpu_address_space_init:
  78 * @cpu: CPU to add this address space to
  79 * @asidx: integer index of this address space
  80 * @prefix: prefix to be used as name of address space
  81 * @mr: the root memory region of address space
  82 *
  83 * Add the specified address space to the CPU's cpu_ases list.
  84 * The address space added with @asidx 0 is the one used for the
  85 * convenience pointer cpu->as.
  86 * The target-specific code which registers ASes is responsible
  87 * for defining what semantics address space 0, 1, 2, etc have.
  88 *
  89 * Before the first call to this function, the caller must set
  90 * cpu->num_ases to the total number of address spaces it needs
  91 * to support.
  92 *
  93 * Note that with KVM only one address space is supported.
  94 */
  95void cpu_address_space_init(CPUState *cpu, int asidx,
  96                            const char *prefix, MemoryRegion *mr);
  97#endif
  98
  99#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
 100/* cputlb.c */
 101/**
 102 * tlb_init - initialize a CPU's TLB
 103 * @cpu: CPU whose TLB should be initialized
 104 */
 105void tlb_init(CPUState *cpu);
 106/**
 107 * tlb_flush_page:
 108 * @cpu: CPU whose TLB should be flushed
 109 * @addr: virtual address of page to be flushed
 110 *
 111 * Flush one page from the TLB of the specified CPU, for all
 112 * MMU indexes.
 113 */
 114void tlb_flush_page(CPUState *cpu, target_ulong addr);
 115/**
 116 * tlb_flush_page_all_cpus:
 117 * @cpu: src CPU of the flush
 118 * @addr: virtual address of page to be flushed
 119 *
 120 * Flush one page from the TLB of the specified CPU, for all
 121 * MMU indexes.
 122 */
 123void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
 124/**
 125 * tlb_flush_page_all_cpus_synced:
 126 * @cpu: src CPU of the flush
 127 * @addr: virtual address of page to be flushed
 128 *
 129 * Flush one page from the TLB of the specified CPU, for all MMU
 130 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
 131 * is scheduled as safe work meaning all flushes will be complete once
 132 * the source vCPUs safe work is complete. This will depend on when
 133 * the guests translation ends the TB.
 134 */
 135void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
 136/**
 137 * tlb_flush:
 138 * @cpu: CPU whose TLB should be flushed
 139 *
 140 * Flush the entire TLB for the specified CPU. Most CPU architectures
 141 * allow the implementation to drop entries from the TLB at any time
 142 * so this is generally safe. If more selective flushing is required
 143 * use one of the other functions for efficiency.
 144 */
 145void tlb_flush(CPUState *cpu);
 146/**
 147 * tlb_flush_all_cpus:
 148 * @cpu: src CPU of the flush
 149 */
 150void tlb_flush_all_cpus(CPUState *src_cpu);
 151/**
 152 * tlb_flush_all_cpus_synced:
 153 * @cpu: src CPU of the flush
 154 *
 155 * Like tlb_flush_all_cpus except this except the source vCPUs work is
 156 * scheduled as safe work meaning all flushes will be complete once
 157 * the source vCPUs safe work is complete. This will depend on when
 158 * the guests translation ends the TB.
 159 */
 160void tlb_flush_all_cpus_synced(CPUState *src_cpu);
 161/**
 162 * tlb_flush_page_by_mmuidx:
 163 * @cpu: CPU whose TLB should be flushed
 164 * @addr: virtual address of page to be flushed
 165 * @idxmap: bitmap of MMU indexes to flush
 166 *
 167 * Flush one page from the TLB of the specified CPU, for the specified
 168 * MMU indexes.
 169 */
 170void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
 171                              uint16_t idxmap);
 172/**
 173 * tlb_flush_page_by_mmuidx_all_cpus:
 174 * @cpu: Originating CPU of the flush
 175 * @addr: virtual address of page to be flushed
 176 * @idxmap: bitmap of MMU indexes to flush
 177 *
 178 * Flush one page from the TLB of all CPUs, for the specified
 179 * MMU indexes.
 180 */
 181void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
 182                                       uint16_t idxmap);
 183/**
 184 * tlb_flush_page_by_mmuidx_all_cpus_synced:
 185 * @cpu: Originating CPU of the flush
 186 * @addr: virtual address of page to be flushed
 187 * @idxmap: bitmap of MMU indexes to flush
 188 *
 189 * Flush one page from the TLB of all CPUs, for the specified MMU
 190 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
 191 * vCPUs work is scheduled as safe work meaning all flushes will be
 192 * complete once  the source vCPUs safe work is complete. This will
 193 * depend on when the guests translation ends the TB.
 194 */
 195void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
 196                                              uint16_t idxmap);
 197/**
 198 * tlb_flush_by_mmuidx:
 199 * @cpu: CPU whose TLB should be flushed
 200 * @wait: If true ensure synchronisation by exiting the cpu_loop
 201 * @idxmap: bitmap of MMU indexes to flush
 202 *
 203 * Flush all entries from the TLB of the specified CPU, for the specified
 204 * MMU indexes.
 205 */
 206void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
 207/**
 208 * tlb_flush_by_mmuidx_all_cpus:
 209 * @cpu: Originating CPU of the flush
 210 * @idxmap: bitmap of MMU indexes to flush
 211 *
 212 * Flush all entries from all TLBs of all CPUs, for the specified
 213 * MMU indexes.
 214 */
 215void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
 216/**
 217 * tlb_flush_by_mmuidx_all_cpus_synced:
 218 * @cpu: Originating CPU of the flush
 219 * @idxmap: bitmap of MMU indexes to flush
 220 *
 221 * Flush all entries from all TLBs of all CPUs, for the specified
 222 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
 223 * vCPUs work is scheduled as safe work meaning all flushes will be
 224 * complete once  the source vCPUs safe work is complete. This will
 225 * depend on when the guests translation ends the TB.
 226 */
 227void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
 228/**
 229 * tlb_set_page_with_attrs:
 230 * @cpu: CPU to add this TLB entry for
 231 * @vaddr: virtual address of page to add entry for
 232 * @paddr: physical address of the page
 233 * @attrs: memory transaction attributes
 234 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
 235 * @mmu_idx: MMU index to insert TLB entry for
 236 * @size: size of the page in bytes
 237 *
 238 * Add an entry to this CPU's TLB (a mapping from virtual address
 239 * @vaddr to physical address @paddr) with the specified memory
 240 * transaction attributes. This is generally called by the target CPU
 241 * specific code after it has been called through the tlb_fill()
 242 * entry point and performed a successful page table walk to find
 243 * the physical address and attributes for the virtual address
 244 * which provoked the TLB miss.
 245 *
 246 * At most one entry for a given virtual address is permitted. Only a
 247 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
 248 * used by tlb_flush_page.
 249 */
 250void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
 251                             hwaddr paddr, MemTxAttrs attrs,
 252                             int prot, int mmu_idx, target_ulong size);
 253/* tlb_set_page:
 254 *
 255 * This function is equivalent to calling tlb_set_page_with_attrs()
 256 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
 257 * as a convenience for CPUs which don't use memory transaction attributes.
 258 */
 259void tlb_set_page(CPUState *cpu, target_ulong vaddr,
 260                  hwaddr paddr, int prot,
 261                  int mmu_idx, target_ulong size);
 262void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
 263                 uintptr_t retaddr);
 264#else
 265static inline void tlb_init(CPUState *cpu)
 266{
 267}
 268static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
 269{
 270}
 271static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
 272{
 273}
 274static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
 275                                                  target_ulong addr)
 276{
 277}
 278static inline void tlb_flush(CPUState *cpu)
 279{
 280}
 281static inline void tlb_flush_all_cpus(CPUState *src_cpu)
 282{
 283}
 284static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
 285{
 286}
 287static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
 288                                            target_ulong addr, uint16_t idxmap)
 289{
 290}
 291
 292static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
 293{
 294}
 295static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
 296                                                     target_ulong addr,
 297                                                     uint16_t idxmap)
 298{
 299}
 300static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
 301                                                            target_ulong addr,
 302                                                            uint16_t idxmap)
 303{
 304}
 305static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
 306{
 307}
 308
 309static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
 310                                                       uint16_t idxmap)
 311{
 312}
 313#endif
 314
 315#define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
 316
 317/* Estimated block size for TB allocation.  */
 318/* ??? The following is based on a 2015 survey of x86_64 host output.
 319   Better would seem to be some sort of dynamically sized TB array,
 320   adapting to the block sizes actually being produced.  */
 321#if defined(CONFIG_SOFTMMU)
 322#define CODE_GEN_AVG_BLOCK_SIZE 400
 323#else
 324#define CODE_GEN_AVG_BLOCK_SIZE 150
 325#endif
 326
 327/*
 328 * Translation Cache-related fields of a TB.
 329 * This struct exists just for convenience; we keep track of TB's in a binary
 330 * search tree, and the only fields needed to compare TB's in the tree are
 331 * @ptr and @size.
 332 * Note: the address of search data can be obtained by adding @size to @ptr.
 333 */
 334struct tb_tc {
 335    void *ptr;    /* pointer to the translated code */
 336    size_t size;
 337};
 338
 339struct TranslationBlock {
 340    target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
 341    target_ulong cs_base; /* CS base for this block */
 342    uint32_t flags; /* flags defining in which context the code was generated */
 343    uint16_t size;      /* size of target code for this block (1 <=
 344                           size <= TARGET_PAGE_SIZE) */
 345    uint16_t icount;
 346    uint32_t cflags;    /* compile flags */
 347#define CF_COUNT_MASK  0x00007fff
 348#define CF_LAST_IO     0x00008000 /* Last insn may be an IO access.  */
 349#define CF_NOCACHE     0x00010000 /* To be freed after execution */
 350#define CF_USE_ICOUNT  0x00020000
 351#define CF_INVALID     0x00040000 /* TB is stale. Set with @jmp_lock held */
 352#define CF_PARALLEL    0x00080000 /* Generate code for a parallel context */
 353#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
 354#define CF_CLUSTER_SHIFT 24
 355/* cflags' mask for hashing/comparison */
 356#define CF_HASH_MASK   \
 357    (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK)
 358
 359    /* Per-vCPU dynamic tracing state used to generate this TB */
 360    uint32_t trace_vcpu_dstate;
 361
 362    struct tb_tc tc;
 363
 364    /* original tb when cflags has CF_NOCACHE */
 365    struct TranslationBlock *orig_tb;
 366    /* first and second physical page containing code. The lower bit
 367       of the pointer tells the index in page_next[].
 368       The list is protected by the TB's page('s) lock(s) */
 369    uintptr_t page_next[2];
 370    tb_page_addr_t page_addr[2];
 371
 372    /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
 373    QemuSpin jmp_lock;
 374
 375    /* The following data are used to directly call another TB from
 376     * the code of this one. This can be done either by emitting direct or
 377     * indirect native jump instructions. These jumps are reset so that the TB
 378     * just continues its execution. The TB can be linked to another one by
 379     * setting one of the jump targets (or patching the jump instruction). Only
 380     * two of such jumps are supported.
 381     */
 382    uint16_t jmp_reset_offset[2]; /* offset of original jump target */
 383#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
 384    uintptr_t jmp_target_arg[2];  /* target address or offset */
 385
 386    /*
 387     * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
 388     * Each TB can have two outgoing jumps, and therefore can participate
 389     * in two lists. The list entries are kept in jmp_list_next[2]. The least
 390     * significant bit (LSB) of the pointers in these lists is used to encode
 391     * which of the two list entries is to be used in the pointed TB.
 392     *
 393     * List traversals are protected by jmp_lock. The destination TB of each
 394     * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
 395     * can be acquired from any origin TB.
 396     *
 397     * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
 398     * being invalidated, so that no further outgoing jumps from it can be set.
 399     *
 400     * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
 401     * to a destination TB that has CF_INVALID set.
 402     */
 403    uintptr_t jmp_list_head;
 404    uintptr_t jmp_list_next[2];
 405    uintptr_t jmp_dest[2];
 406};
 407
 408extern bool parallel_cpus;
 409
 410/* Hide the atomic_read to make code a little easier on the eyes */
 411static inline uint32_t tb_cflags(const TranslationBlock *tb)
 412{
 413    return atomic_read(&tb->cflags);
 414}
 415
 416/* current cflags for hashing/comparison */
 417static inline uint32_t curr_cflags(void)
 418{
 419    return (parallel_cpus ? CF_PARALLEL : 0)
 420         | (use_icount ? CF_USE_ICOUNT : 0);
 421}
 422
 423/* TranslationBlock invalidate API */
 424#if defined(CONFIG_USER_ONLY)
 425void tb_invalidate_phys_addr(target_ulong addr);
 426void tb_invalidate_phys_range(target_ulong start, target_ulong end);
 427#else
 428void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
 429#endif
 430void tb_flush(CPUState *cpu);
 431void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
 432TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
 433                                   target_ulong cs_base, uint32_t flags,
 434                                   uint32_t cf_mask);
 435void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
 436
 437/* GETPC is the true target of the return instruction that we'll execute.  */
 438#if defined(CONFIG_TCG_INTERPRETER)
 439extern uintptr_t tci_tb_ptr;
 440# define GETPC() tci_tb_ptr
 441#else
 442# define GETPC() \
 443    ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
 444#endif
 445
 446/* The true return address will often point to a host insn that is part of
 447   the next translated guest insn.  Adjust the address backward to point to
 448   the middle of the call insn.  Subtracting one would do the job except for
 449   several compressed mode architectures (arm, mips) which set the low bit
 450   to indicate the compressed mode; subtracting two works around that.  It
 451   is also the case that there are no host isas that contain a call insn
 452   smaller than 4 bytes, so we don't worry about special-casing this.  */
 453#define GETPC_ADJ   2
 454
 455#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
 456void assert_no_pages_locked(void);
 457#else
 458static inline void assert_no_pages_locked(void)
 459{
 460}
 461#endif
 462
 463#if !defined(CONFIG_USER_ONLY)
 464
 465/**
 466 * iotlb_to_section:
 467 * @cpu: CPU performing the access
 468 * @index: TCG CPU IOTLB entry
 469 *
 470 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
 471 * it refers to. @index will have been initially created and returned
 472 * by memory_region_section_get_iotlb().
 473 */
 474struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
 475                                             hwaddr index, MemTxAttrs attrs);
 476#endif
 477
 478#if defined(CONFIG_USER_ONLY)
 479void mmap_lock(void);
 480void mmap_unlock(void);
 481bool have_mmap_lock(void);
 482
 483static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
 484{
 485    return addr;
 486}
 487#else
 488static inline void mmap_lock(void) {}
 489static inline void mmap_unlock(void) {}
 490
 491/* cputlb.c */
 492tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
 493
 494void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
 495void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
 496
 497/* exec.c */
 498void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
 499
 500MemoryRegionSection *
 501address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
 502                                  hwaddr *xlat, hwaddr *plen,
 503                                  MemTxAttrs attrs, int *prot);
 504hwaddr memory_region_section_get_iotlb(CPUState *cpu,
 505                                       MemoryRegionSection *section,
 506                                       target_ulong vaddr,
 507                                       hwaddr paddr, hwaddr xlat,
 508                                       int prot,
 509                                       target_ulong *address);
 510#endif
 511
 512/* vl.c */
 513extern int singlestep;
 514
 515#endif
 516