qemu/include/exec/exec-all.h
<<
>>
Prefs
   1/*
   2 * internal execution defines for qemu
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#ifndef _EXEC_ALL_H_
  21#define _EXEC_ALL_H_
  22
  23#include "qemu-common.h"
  24
  25/* allow to see translation results - the slowdown should be negligible, so we leave it */
  26#define DEBUG_DISAS
  27
  28/* Page tracking code uses ram addresses in system mode, and virtual
  29   addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
  30   type.  */
  31#if defined(CONFIG_USER_ONLY)
  32typedef abi_ulong tb_page_addr_t;
  33#else
  34typedef ram_addr_t tb_page_addr_t;
  35#endif
  36
  37/* is_jmp field values */
  38#define DISAS_NEXT    0 /* next instruction can be analyzed */
  39#define DISAS_JUMP    1 /* only pc was modified dynamically */
  40#define DISAS_UPDATE  2 /* cpu state was modified dynamically */
  41#define DISAS_TB_JUMP 3 /* only pc was modified statically */
  42
  43struct TranslationBlock;
  44typedef struct TranslationBlock TranslationBlock;
  45
  46/* XXX: make safe guess about sizes */
  47#define MAX_OP_PER_INSTR 266
  48
  49#if HOST_LONG_BITS == 32
  50#define MAX_OPC_PARAM_PER_ARG 2
  51#else
  52#define MAX_OPC_PARAM_PER_ARG 1
  53#endif
  54#define MAX_OPC_PARAM_IARGS 5
  55#define MAX_OPC_PARAM_OARGS 1
  56#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
  57
  58/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
  59 * and up to 4 + N parameters on 64-bit archs
  60 * (N = number of input arguments + output arguments).  */
  61#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
  62#define OPC_BUF_SIZE 640
  63#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
  64
  65#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
  66
  67#include "qemu/log.h"
  68
  69void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
  70void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
  71                          target_ulong *data);
  72
  73void cpu_gen_init(void);
  74bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
  75
  76void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
  77void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
  78TranslationBlock *tb_gen_code(CPUState *cpu,
  79                              target_ulong pc, target_ulong cs_base, int flags,
  80                              int cflags);
  81void cpu_exec_init(CPUState *cpu, Error **errp);
  82void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
  83void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
  84
  85#if !defined(CONFIG_USER_ONLY)
  86void cpu_reloading_memory_map(void);
  87void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
  88/* cputlb.c */
  89/**
  90 * tlb_flush_page:
  91 * @cpu: CPU whose TLB should be flushed
  92 * @addr: virtual address of page to be flushed
  93 *
  94 * Flush one page from the TLB of the specified CPU, for all
  95 * MMU indexes.
  96 */
  97void tlb_flush_page(CPUState *cpu, target_ulong addr);
  98/**
  99 * tlb_flush:
 100 * @cpu: CPU whose TLB should be flushed
 101 * @flush_global: ignored
 102 *
 103 * Flush the entire TLB for the specified CPU.
 104 * The flush_global flag is in theory an indicator of whether the whole
 105 * TLB should be flushed, or only those entries not marked global.
 106 * In practice QEMU does not implement any global/not global flag for
 107 * TLB entries, and the argument is ignored.
 108 */
 109void tlb_flush(CPUState *cpu, int flush_global);
 110/**
 111 * tlb_flush_page_by_mmuidx:
 112 * @cpu: CPU whose TLB should be flushed
 113 * @addr: virtual address of page to be flushed
 114 * @...: list of MMU indexes to flush, terminated by a negative value
 115 *
 116 * Flush one page from the TLB of the specified CPU, for the specified
 117 * MMU indexes.
 118 */
 119void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
 120/**
 121 * tlb_flush_by_mmuidx:
 122 * @cpu: CPU whose TLB should be flushed
 123 * @...: list of MMU indexes to flush, terminated by a negative value
 124 *
 125 * Flush all entries from the TLB of the specified CPU, for the specified
 126 * MMU indexes.
 127 */
 128void tlb_flush_by_mmuidx(CPUState *cpu, ...);
 129void tlb_set_page(CPUState *cpu, target_ulong vaddr,
 130                  hwaddr paddr, int prot,
 131                  int mmu_idx, target_ulong size);
 132void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
 133                             hwaddr paddr, MemTxAttrs attrs,
 134                             int prot, int mmu_idx, target_ulong size);
 135void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
 136void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
 137                 uintptr_t retaddr);
 138#else
 139static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
 140{
 141}
 142
 143static inline void tlb_flush(CPUState *cpu, int flush_global)
 144{
 145}
 146
 147static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
 148                                            target_ulong addr, ...)
 149{
 150}
 151
 152static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
 153{
 154}
 155#endif
 156
 157#define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
 158
 159#define CODE_GEN_PHYS_HASH_BITS     15
 160#define CODE_GEN_PHYS_HASH_SIZE     (1 << CODE_GEN_PHYS_HASH_BITS)
 161
 162/* Estimated block size for TB allocation.  */
 163/* ??? The following is based on a 2015 survey of x86_64 host output.
 164   Better would seem to be some sort of dynamically sized TB array,
 165   adapting to the block sizes actually being produced.  */
 166#if defined(CONFIG_SOFTMMU)
 167#define CODE_GEN_AVG_BLOCK_SIZE 400
 168#else
 169#define CODE_GEN_AVG_BLOCK_SIZE 150
 170#endif
 171
 172#if defined(__arm__) || defined(_ARCH_PPC) \
 173    || defined(__x86_64__) || defined(__i386__) \
 174    || defined(__sparc__) || defined(__aarch64__) \
 175    || defined(__s390x__) || defined(__mips__) \
 176    || defined(CONFIG_TCG_INTERPRETER)
 177#define USE_DIRECT_JUMP
 178#endif
 179
 180struct TranslationBlock {
 181    target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
 182    target_ulong cs_base; /* CS base for this block */
 183    uint64_t flags; /* flags defining in which context the code was generated */
 184    uint16_t size;      /* size of target code for this block (1 <=
 185                           size <= TARGET_PAGE_SIZE) */
 186    uint16_t icount;
 187    uint32_t cflags;    /* compile flags */
 188#define CF_COUNT_MASK  0x7fff
 189#define CF_LAST_IO     0x8000 /* Last insn may be an IO access.  */
 190#define CF_NOCACHE     0x10000 /* To be freed after execution */
 191#define CF_USE_ICOUNT  0x20000
 192#define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */
 193
 194    void *tc_ptr;    /* pointer to the translated code */
 195    uint8_t *tc_search;  /* pointer to search data */
 196    /* next matching tb for physical address. */
 197    struct TranslationBlock *phys_hash_next;
 198    /* original tb when cflags has CF_NOCACHE */
 199    struct TranslationBlock *orig_tb;
 200    /* first and second physical page containing code. The lower bit
 201       of the pointer tells the index in page_next[] */
 202    struct TranslationBlock *page_next[2];
 203    tb_page_addr_t page_addr[2];
 204
 205    /* the following data are used to directly call another TB from
 206       the code of this one. */
 207    uint16_t tb_next_offset[2]; /* offset of original jump target */
 208#ifdef USE_DIRECT_JUMP
 209    uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
 210#else
 211    uintptr_t tb_next[2]; /* address of jump generated code */
 212#endif
 213    /* list of TBs jumping to this one. This is a circular list using
 214       the two least significant bits of the pointers to tell what is
 215       the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
 216       jmp_first */
 217    struct TranslationBlock *jmp_next[2];
 218    struct TranslationBlock *jmp_first;
 219};
 220
 221#include "qemu/thread.h"
 222
 223typedef struct TBContext TBContext;
 224
 225struct TBContext {
 226
 227    TranslationBlock *tbs;
 228    TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
 229    int nb_tbs;
 230    /* any access to the tbs or the page table must use this lock */
 231    QemuMutex tb_lock;
 232
 233    /* statistics */
 234    int tb_flush_count;
 235    int tb_phys_invalidate_count;
 236
 237    int tb_invalidated_flag;
 238};
 239
 240void tb_free(TranslationBlock *tb);
 241void tb_flush(CPUState *cpu);
 242void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
 243
 244#if defined(USE_DIRECT_JUMP)
 245
 246#if defined(CONFIG_TCG_INTERPRETER)
 247static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
 248{
 249    /* patch the branch destination */
 250    *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
 251    /* no need to flush icache explicitly */
 252}
 253#elif defined(_ARCH_PPC)
 254void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
 255#define tb_set_jmp_target1 ppc_tb_set_jmp_target
 256#elif defined(__i386__) || defined(__x86_64__)
 257static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
 258{
 259    /* patch the branch destination */
 260    stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4));
 261    /* no need to flush icache explicitly */
 262}
 263#elif defined(__s390x__)
 264static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
 265{
 266    /* patch the branch destination */
 267    intptr_t disp = addr - (jmp_addr - 2);
 268    stl_be_p((void*)jmp_addr, disp / 2);
 269    /* no need to flush icache explicitly */
 270}
 271#elif defined(__aarch64__)
 272void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
 273#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
 274#elif defined(__arm__)
 275static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
 276{
 277#if !QEMU_GNUC_PREREQ(4, 1)
 278    register unsigned long _beg __asm ("a1");
 279    register unsigned long _end __asm ("a2");
 280    register unsigned long _flg __asm ("a3");
 281#endif
 282
 283    /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
 284    *(uint32_t *)jmp_addr =
 285        (*(uint32_t *)jmp_addr & ~0xffffff)
 286        | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
 287
 288#if QEMU_GNUC_PREREQ(4, 1)
 289    __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
 290#else
 291    /* flush icache */
 292    _beg = jmp_addr;
 293    _end = jmp_addr + 4;
 294    _flg = 0;
 295    __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
 296#endif
 297}
 298#elif defined(__sparc__) || defined(__mips__)
 299void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
 300#else
 301#error tb_set_jmp_target1 is missing
 302#endif
 303
 304static inline void tb_set_jmp_target(TranslationBlock *tb,
 305                                     int n, uintptr_t addr)
 306{
 307    uint16_t offset = tb->tb_jmp_offset[n];
 308    tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
 309}
 310
 311#else
 312
 313/* set the jump target */
 314static inline void tb_set_jmp_target(TranslationBlock *tb,
 315                                     int n, uintptr_t addr)
 316{
 317    tb->tb_next[n] = addr;
 318}
 319
 320#endif
 321
 322static inline void tb_add_jump(TranslationBlock *tb, int n,
 323                               TranslationBlock *tb_next)
 324{
 325    /* NOTE: this test is only needed for thread safety */
 326    if (!tb->jmp_next[n]) {
 327        /* patch the native jump address */
 328        tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
 329
 330        /* add in TB jmp circular list */
 331        tb->jmp_next[n] = tb_next->jmp_first;
 332        tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
 333    }
 334}
 335
 336/* GETRA is the true target of the return instruction that we'll execute,
 337   defined here for simplicity of defining the follow-up macros.  */
 338#if defined(CONFIG_TCG_INTERPRETER)
 339extern uintptr_t tci_tb_ptr;
 340# define GETRA() tci_tb_ptr
 341#else
 342# define GETRA() \
 343    ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
 344#endif
 345
 346/* The true return address will often point to a host insn that is part of
 347   the next translated guest insn.  Adjust the address backward to point to
 348   the middle of the call insn.  Subtracting one would do the job except for
 349   several compressed mode architectures (arm, mips) which set the low bit
 350   to indicate the compressed mode; subtracting two works around that.  It
 351   is also the case that there are no host isas that contain a call insn
 352   smaller than 4 bytes, so we don't worry about special-casing this.  */
 353#define GETPC_ADJ   2
 354
 355#define GETPC()  (GETRA() - GETPC_ADJ)
 356
 357#if !defined(CONFIG_USER_ONLY)
 358
 359struct MemoryRegion *iotlb_to_region(CPUState *cpu,
 360                                     hwaddr index);
 361
 362void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
 363              uintptr_t retaddr);
 364
 365#endif
 366
 367#if defined(CONFIG_USER_ONLY)
 368void mmap_lock(void);
 369void mmap_unlock(void);
 370
 371static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
 372{
 373    return addr;
 374}
 375#else
 376static inline void mmap_lock(void) {}
 377static inline void mmap_unlock(void) {}
 378
 379/* cputlb.c */
 380tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
 381
 382void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
 383void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
 384
 385/* exec.c */
 386void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
 387
 388MemoryRegionSection *
 389address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr, hwaddr *xlat,
 390                                  hwaddr *plen);
 391hwaddr memory_region_section_get_iotlb(CPUState *cpu,
 392                                       MemoryRegionSection *section,
 393                                       target_ulong vaddr,
 394                                       hwaddr paddr, hwaddr xlat,
 395                                       int prot,
 396                                       target_ulong *address);
 397bool memory_region_is_unassigned(MemoryRegion *mr);
 398
 399#endif
 400
 401/* vl.c */
 402extern int singlestep;
 403
 404/* cpu-exec.c, accessed with atomic_mb_read/atomic_mb_set */
 405extern CPUState *tcg_current_cpu;
 406extern bool exit_request;
 407
 408#endif
 409