qemu/include/exec/exec-all.h
<<
>>
Prefs
   1/*
   2 * internal execution defines for qemu
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#ifndef EXEC_ALL_H
  21#define EXEC_ALL_H
  22
  23#include "qemu-common.h"
  24#include "exec/tb-context.h"
  25
  26/* allow to see translation results - the slowdown should be negligible, so we leave it */
  27#define DEBUG_DISAS
  28
  29/* Page tracking code uses ram addresses in system mode, and virtual
  30   addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
  31   type.  */
  32#if defined(CONFIG_USER_ONLY)
  33typedef abi_ulong tb_page_addr_t;
  34#else
  35typedef ram_addr_t tb_page_addr_t;
  36#endif
  37
  38/* is_jmp field values */
  39#define DISAS_NEXT    0 /* next instruction can be analyzed */
  40#define DISAS_JUMP    1 /* only pc was modified dynamically */
  41#define DISAS_UPDATE  2 /* cpu state was modified dynamically */
  42#define DISAS_TB_JUMP 3 /* only pc was modified statically */
  43
  44#include "qemu/log.h"
  45
  46void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
  47void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
  48                          target_ulong *data);
  49
  50void cpu_gen_init(void);
  51bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
  52
  53void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
  54void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
  55TranslationBlock *tb_gen_code(CPUState *cpu,
  56                              target_ulong pc, target_ulong cs_base,
  57                              uint32_t flags,
  58                              int cflags);
  59
  60void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
  61void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
  62void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
  63
  64#if !defined(CONFIG_USER_ONLY)
  65void cpu_reloading_memory_map(void);
  66/**
  67 * cpu_address_space_init:
  68 * @cpu: CPU to add this address space to
  69 * @as: address space to add
  70 * @asidx: integer index of this address space
  71 *
  72 * Add the specified address space to the CPU's cpu_ases list.
  73 * The address space added with @asidx 0 is the one used for the
  74 * convenience pointer cpu->as.
  75 * The target-specific code which registers ASes is responsible
  76 * for defining what semantics address space 0, 1, 2, etc have.
  77 *
  78 * Before the first call to this function, the caller must set
  79 * cpu->num_ases to the total number of address spaces it needs
  80 * to support.
  81 *
  82 * Note that with KVM only one address space is supported.
  83 */
  84void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
  85/* cputlb.c */
  86/**
  87 * tlb_flush_page:
  88 * @cpu: CPU whose TLB should be flushed
  89 * @addr: virtual address of page to be flushed
  90 *
  91 * Flush one page from the TLB of the specified CPU, for all
  92 * MMU indexes.
  93 */
  94void tlb_flush_page(CPUState *cpu, target_ulong addr);
  95/**
  96 * tlb_flush:
  97 * @cpu: CPU whose TLB should be flushed
  98 * @flush_global: ignored
  99 *
 100 * Flush the entire TLB for the specified CPU.
 101 * The flush_global flag is in theory an indicator of whether the whole
 102 * TLB should be flushed, or only those entries not marked global.
 103 * In practice QEMU does not implement any global/not global flag for
 104 * TLB entries, and the argument is ignored.
 105 */
 106void tlb_flush(CPUState *cpu, int flush_global);
 107/**
 108 * tlb_flush_page_by_mmuidx:
 109 * @cpu: CPU whose TLB should be flushed
 110 * @addr: virtual address of page to be flushed
 111 * @...: list of MMU indexes to flush, terminated by a negative value
 112 *
 113 * Flush one page from the TLB of the specified CPU, for the specified
 114 * MMU indexes.
 115 */
 116void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
 117/**
 118 * tlb_flush_by_mmuidx:
 119 * @cpu: CPU whose TLB should be flushed
 120 * @...: list of MMU indexes to flush, terminated by a negative value
 121 *
 122 * Flush all entries from the TLB of the specified CPU, for the specified
 123 * MMU indexes.
 124 */
 125void tlb_flush_by_mmuidx(CPUState *cpu, ...);
 126/**
 127 * tlb_set_page_with_attrs:
 128 * @cpu: CPU to add this TLB entry for
 129 * @vaddr: virtual address of page to add entry for
 130 * @paddr: physical address of the page
 131 * @attrs: memory transaction attributes
 132 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
 133 * @mmu_idx: MMU index to insert TLB entry for
 134 * @size: size of the page in bytes
 135 *
 136 * Add an entry to this CPU's TLB (a mapping from virtual address
 137 * @vaddr to physical address @paddr) with the specified memory
 138 * transaction attributes. This is generally called by the target CPU
 139 * specific code after it has been called through the tlb_fill()
 140 * entry point and performed a successful page table walk to find
 141 * the physical address and attributes for the virtual address
 142 * which provoked the TLB miss.
 143 *
 144 * At most one entry for a given virtual address is permitted. Only a
 145 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
 146 * used by tlb_flush_page.
 147 */
 148void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
 149                             hwaddr paddr, MemTxAttrs attrs,
 150                             int prot, int mmu_idx, target_ulong size);
 151/* tlb_set_page:
 152 *
 153 * This function is equivalent to calling tlb_set_page_with_attrs()
 154 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
 155 * as a convenience for CPUs which don't use memory transaction attributes.
 156 */
 157void tlb_set_page(CPUState *cpu, target_ulong vaddr,
 158                  hwaddr paddr, int prot,
 159                  int mmu_idx, target_ulong size);
 160void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
 161void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
 162                 uintptr_t retaddr);
 163#else
 164static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
 165{
 166}
 167
 168static inline void tlb_flush(CPUState *cpu, int flush_global)
 169{
 170}
 171
 172static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
 173                                            target_ulong addr, ...)
 174{
 175}
 176
 177static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
 178{
 179}
 180#endif
 181
 182#define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
 183
 184/* Estimated block size for TB allocation.  */
 185/* ??? The following is based on a 2015 survey of x86_64 host output.
 186   Better would seem to be some sort of dynamically sized TB array,
 187   adapting to the block sizes actually being produced.  */
 188#if defined(CONFIG_SOFTMMU)
 189#define CODE_GEN_AVG_BLOCK_SIZE 400
 190#else
 191#define CODE_GEN_AVG_BLOCK_SIZE 150
 192#endif
 193
 194#if defined(__arm__) || defined(_ARCH_PPC) \
 195    || defined(__x86_64__) || defined(__i386__) \
 196    || defined(__sparc__) || defined(__aarch64__) \
 197    || defined(__s390x__) || defined(__mips__) \
 198    || defined(CONFIG_TCG_INTERPRETER)
 199/* NOTE: Direct jump patching must be atomic to be thread-safe. */
 200#define USE_DIRECT_JUMP
 201#endif
 202
 203struct TranslationBlock {
 204    target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
 205    target_ulong cs_base; /* CS base for this block */
 206    uint32_t flags; /* flags defining in which context the code was generated */
 207    uint16_t size;      /* size of target code for this block (1 <=
 208                           size <= TARGET_PAGE_SIZE) */
 209    uint16_t icount;
 210    uint32_t cflags;    /* compile flags */
 211#define CF_COUNT_MASK  0x7fff
 212#define CF_LAST_IO     0x8000 /* Last insn may be an IO access.  */
 213#define CF_NOCACHE     0x10000 /* To be freed after execution */
 214#define CF_USE_ICOUNT  0x20000
 215#define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */
 216
 217    uint16_t invalid;
 218
 219    void *tc_ptr;    /* pointer to the translated code */
 220    uint8_t *tc_search;  /* pointer to search data */
 221    /* original tb when cflags has CF_NOCACHE */
 222    struct TranslationBlock *orig_tb;
 223    /* first and second physical page containing code. The lower bit
 224       of the pointer tells the index in page_next[] */
 225    struct TranslationBlock *page_next[2];
 226    tb_page_addr_t page_addr[2];
 227
 228    /* The following data are used to directly call another TB from
 229     * the code of this one. This can be done either by emitting direct or
 230     * indirect native jump instructions. These jumps are reset so that the TB
 231     * just continue its execution. The TB can be linked to another one by
 232     * setting one of the jump targets (or patching the jump instruction). Only
 233     * two of such jumps are supported.
 234     */
 235    uint16_t jmp_reset_offset[2]; /* offset of original jump target */
 236#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
 237#ifdef USE_DIRECT_JUMP
 238    uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */
 239#else
 240    uintptr_t jmp_target_addr[2]; /* target address for indirect jump */
 241#endif
 242    /* Each TB has an assosiated circular list of TBs jumping to this one.
 243     * jmp_list_first points to the first TB jumping to this one.
 244     * jmp_list_next is used to point to the next TB in a list.
 245     * Since each TB can have two jumps, it can participate in two lists.
 246     * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a
 247     * TranslationBlock structure, but the two least significant bits of
 248     * them are used to encode which data field of the pointed TB should
 249     * be used to traverse the list further from that TB:
 250     * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
 251     * In other words, 0/1 tells which jump is used in the pointed TB,
 252     * and 2 means that this is a pointer back to the target TB of this list.
 253     */
 254    uintptr_t jmp_list_next[2];
 255    uintptr_t jmp_list_first;
 256};
 257
 258void tb_free(TranslationBlock *tb);
 259void tb_flush(CPUState *cpu);
 260void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
 261
 262#if defined(USE_DIRECT_JUMP)
 263
 264#if defined(CONFIG_TCG_INTERPRETER)
 265static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
 266{
 267    /* patch the branch destination */
 268    atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
 269    /* no need to flush icache explicitly */
 270}
 271#elif defined(_ARCH_PPC)
 272void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
 273#define tb_set_jmp_target1 ppc_tb_set_jmp_target
 274#elif defined(__i386__) || defined(__x86_64__)
 275static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
 276{
 277    /* patch the branch destination */
 278    atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
 279    /* no need to flush icache explicitly */
 280}
 281#elif defined(__s390x__)
 282static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
 283{
 284    /* patch the branch destination */
 285    intptr_t disp = addr - (jmp_addr - 2);
 286    atomic_set((int32_t *)jmp_addr, disp / 2);
 287    /* no need to flush icache explicitly */
 288}
 289#elif defined(__aarch64__)
 290void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
 291#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
 292#elif defined(__arm__)
 293void arm_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
 294#define tb_set_jmp_target1 arm_tb_set_jmp_target
 295#elif defined(__sparc__) || defined(__mips__)
 296void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
 297#else
 298#error tb_set_jmp_target1 is missing
 299#endif
 300
 301static inline void tb_set_jmp_target(TranslationBlock *tb,
 302                                     int n, uintptr_t addr)
 303{
 304    uint16_t offset = tb->jmp_insn_offset[n];
 305    tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
 306}
 307
 308#else
 309
 310/* set the jump target */
 311static inline void tb_set_jmp_target(TranslationBlock *tb,
 312                                     int n, uintptr_t addr)
 313{
 314    tb->jmp_target_addr[n] = addr;
 315}
 316
 317#endif
 318
 319/* Called with tb_lock held.  */
 320static inline void tb_add_jump(TranslationBlock *tb, int n,
 321                               TranslationBlock *tb_next)
 322{
 323    assert(n < ARRAY_SIZE(tb->jmp_list_next));
 324    if (tb->jmp_list_next[n]) {
 325        /* Another thread has already done this while we were
 326         * outside of the lock; nothing to do in this case */
 327        return;
 328    }
 329    qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
 330                           "Linking TBs %p [" TARGET_FMT_lx
 331                           "] index %d -> %p [" TARGET_FMT_lx "]\n",
 332                           tb->tc_ptr, tb->pc, n,
 333                           tb_next->tc_ptr, tb_next->pc);
 334
 335    /* patch the native jump address */
 336    tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
 337
 338    /* add in TB jmp circular list */
 339    tb->jmp_list_next[n] = tb_next->jmp_list_first;
 340    tb_next->jmp_list_first = (uintptr_t)tb | n;
 341}
 342
 343/* GETPC is the true target of the return instruction that we'll execute.  */
 344#if defined(CONFIG_TCG_INTERPRETER)
 345extern uintptr_t tci_tb_ptr;
 346# define GETPC() tci_tb_ptr
 347#else
 348# define GETPC() \
 349    ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
 350#endif
 351
 352/* The true return address will often point to a host insn that is part of
 353   the next translated guest insn.  Adjust the address backward to point to
 354   the middle of the call insn.  Subtracting one would do the job except for
 355   several compressed mode architectures (arm, mips) which set the low bit
 356   to indicate the compressed mode; subtracting two works around that.  It
 357   is also the case that there are no host isas that contain a call insn
 358   smaller than 4 bytes, so we don't worry about special-casing this.  */
 359#define GETPC_ADJ   2
 360
 361#if !defined(CONFIG_USER_ONLY)
 362
 363struct MemoryRegion *iotlb_to_region(CPUState *cpu,
 364                                     hwaddr index, MemTxAttrs attrs);
 365
 366void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type,
 367              int mmu_idx, uintptr_t retaddr);
 368
 369#endif
 370
 371#if defined(CONFIG_USER_ONLY)
 372void mmap_lock(void);
 373void mmap_unlock(void);
 374bool have_mmap_lock(void);
 375
 376static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
 377{
 378    return addr;
 379}
 380#else
 381static inline void mmap_lock(void) {}
 382static inline void mmap_unlock(void) {}
 383
 384/* cputlb.c */
 385tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
 386
 387void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
 388void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
 389
 390/* exec.c */
 391void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
 392
 393MemoryRegionSection *
 394address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
 395                                  hwaddr *xlat, hwaddr *plen, int *prot,
 396                                  MemTxAttrs *attr);
 397hwaddr memory_region_section_get_iotlb(CPUState *cpu,
 398                                       MemoryRegionSection *section,
 399                                       target_ulong vaddr,
 400                                       hwaddr paddr, hwaddr xlat,
 401                                       int prot,
 402                                       target_ulong *address);
 403bool memory_region_is_unassigned(MemoryRegion *mr);
 404
 405#endif
 406
 407/* vl.c */
 408extern int singlestep;
 409
 410/* cpu-exec.c, accessed with atomic_mb_read/atomic_mb_set */
 411extern CPUState *tcg_current_cpu;
 412extern bool exit_request;
 413
 414#endif
 415