qemu/cpu-defs.h
<<
>>
Prefs
   1/*
   2 * common defines for all CPUs
   3 *
   4 * Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifndef CPU_DEFS_H
  20#define CPU_DEFS_H
  21
  22#ifndef NEED_CPU_H
  23#error cpu.h included from common code
  24#endif
  25
  26#include "config.h"
  27#include <setjmp.h>
  28#include <inttypes.h>
  29#include <signal.h>
  30#include "osdep.h"
  31#include "qemu-queue.h"
  32#include "targphys.h"
  33
  34#ifndef TARGET_LONG_BITS
  35#error TARGET_LONG_BITS must be defined before including this header
  36#endif
  37
  38#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
  39
  40typedef int16_t target_short __attribute__ ((aligned(TARGET_SHORT_ALIGNMENT)));
  41typedef uint16_t target_ushort __attribute__((aligned(TARGET_SHORT_ALIGNMENT)));
  42typedef int32_t target_int __attribute__((aligned(TARGET_INT_ALIGNMENT)));
  43typedef uint32_t target_uint __attribute__((aligned(TARGET_INT_ALIGNMENT)));
  44typedef int64_t target_llong __attribute__((aligned(TARGET_LLONG_ALIGNMENT)));
  45typedef uint64_t target_ullong __attribute__((aligned(TARGET_LLONG_ALIGNMENT)));
  46/* target_ulong is the type of a virtual address */
  47#if TARGET_LONG_SIZE == 4
  48typedef int32_t target_long __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
  49typedef uint32_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
  50#define TARGET_FMT_lx "%08x"
  51#define TARGET_FMT_ld "%d"
  52#define TARGET_FMT_lu "%u"
  53#elif TARGET_LONG_SIZE == 8
  54typedef int64_t target_long __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
  55typedef uint64_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
  56#define TARGET_FMT_lx "%016" PRIx64
  57#define TARGET_FMT_ld "%" PRId64
  58#define TARGET_FMT_lu "%" PRIu64
  59#else
  60#error TARGET_LONG_SIZE undefined
  61#endif
  62
  63#define EXCP_INTERRUPT  0x10000 /* async interruption */
  64#define EXCP_HLT        0x10001 /* hlt instruction reached */
  65#define EXCP_DEBUG      0x10002 /* cpu stopped after a breakpoint or singlestep */
  66#define EXCP_HALTED     0x10003 /* cpu is halted (waiting for external event) */
  67
  68#define TB_JMP_CACHE_BITS 12
  69#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
  70
  71/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
  72   addresses on the same page.  The top bits are the same.  This allows
  73   TLB invalidation to quickly clear a subset of the hash table.  */
  74#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
  75#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
  76#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
  77#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
  78
  79#if !defined(CONFIG_USER_ONLY)
  80#define CPU_TLB_BITS 8
  81#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
  82
  83#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
  84#define CPU_TLB_ENTRY_BITS 4
  85#else
  86#define CPU_TLB_ENTRY_BITS 5
  87#endif
  88
  89typedef struct CPUTLBEntry {
  90    /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
  91       bit TARGET_PAGE_BITS-1..4  : Nonzero for accesses that should not
  92                                    go directly to ram.
  93       bit 3                      : indicates that the entry is invalid
  94       bit 2..0                   : zero
  95    */
  96    target_ulong addr_read;
  97    target_ulong addr_write;
  98    target_ulong addr_code;
  99    /* Addend to virtual address to get host address.  IO accesses
 100       use the corresponding iotlb value.  */
 101    uintptr_t addend;
 102    /* padding to get a power of two size */
 103    uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
 104                  (sizeof(target_ulong) * 3 +
 105                   ((-sizeof(target_ulong) * 3) & (sizeof(uintptr_t) - 1)) +
 106                   sizeof(uintptr_t))];
 107} CPUTLBEntry;
 108
 109extern int CPUTLBEntry_wrong_size[sizeof(CPUTLBEntry) == (1 << CPU_TLB_ENTRY_BITS) ? 1 : -1];
 110
 111#define CPU_COMMON_TLB \
 112    /* The meaning of the MMU modes is defined in the target code. */   \
 113    CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE];                  \
 114    target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE];               \
 115    target_ulong tlb_flush_addr;                                        \
 116    target_ulong tlb_flush_mask;
 117
 118#else
 119
 120#define CPU_COMMON_TLB
 121
 122#endif
 123
 124
 125#ifdef HOST_WORDS_BIGENDIAN
 126typedef struct icount_decr_u16 {
 127    uint16_t high;
 128    uint16_t low;
 129} icount_decr_u16;
 130#else
 131typedef struct icount_decr_u16 {
 132    uint16_t low;
 133    uint16_t high;
 134} icount_decr_u16;
 135#endif
 136
 137struct kvm_run;
 138struct KVMState;
 139struct qemu_work_item;
 140
 141typedef struct CPUBreakpoint {
 142    target_ulong pc;
 143    int flags; /* BP_* */
 144    QTAILQ_ENTRY(CPUBreakpoint) entry;
 145} CPUBreakpoint;
 146
 147typedef struct CPUWatchpoint {
 148    target_ulong vaddr;
 149    target_ulong len_mask;
 150    int flags; /* BP_* */
 151    QTAILQ_ENTRY(CPUWatchpoint) entry;
 152} CPUWatchpoint;
 153
 154#define CPU_TEMP_BUF_NLONGS 128
 155#define CPU_COMMON                                                      \
 156    struct TranslationBlock *current_tb; /* currently executing TB  */  \
 157    /* soft mmu support */                                              \
 158    /* in order to avoid passing too many arguments to the MMIO         \
 159       helpers, we store some rarely used information in the CPU        \
 160       context) */                                                      \
 161    uintptr_t mem_io_pc; /* host pc at which the memory was             \
 162                            accessed */                                 \
 163    target_ulong mem_io_vaddr; /* target virtual addr at which the      \
 164                                     memory was accessed */             \
 165    uint32_t halted; /* Nonzero if the CPU is in suspend state */       \
 166    uint32_t interrupt_request;                                         \
 167    volatile sig_atomic_t exit_request;                                 \
 168    CPU_COMMON_TLB                                                      \
 169    struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];           \
 170    /* buffer for temporaries in the code generator */                  \
 171    long temp_buf[CPU_TEMP_BUF_NLONGS];                                 \
 172                                                                        \
 173    int64_t icount_extra; /* Instructions until next timer event.  */   \
 174    /* Number of cycles left, with interrupt flag in high bit.          \
 175       This allows a single read-compare-cbranch-write sequence to test \
 176       for both decrementer underflow and exceptions.  */               \
 177    union {                                                             \
 178        uint32_t u32;                                                   \
 179        icount_decr_u16 u16;                                            \
 180    } icount_decr;                                                      \
 181    uint32_t can_do_io; /* nonzero if memory mapped IO is safe.  */     \
 182                                                                        \
 183    /* from this point: preserved by CPU reset */                       \
 184    /* ice debug support */                                             \
 185    QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;            \
 186    int singlestep_enabled;                                             \
 187                                                                        \
 188    QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;            \
 189    CPUWatchpoint *watchpoint_hit;                                      \
 190                                                                        \
 191    struct GDBRegisterState *gdb_regs;                                  \
 192                                                                        \
 193    /* Core interrupt code */                                           \
 194    jmp_buf jmp_env;                                                    \
 195    int exception_index;                                                \
 196                                                                        \
 197    CPUArchState *next_cpu; /* next CPU sharing TB cache */                 \
 198    int cpu_index; /* CPU index (informative) */                        \
 199    uint32_t host_tid; /* host thread ID */                             \
 200    int numa_node; /* NUMA node this cpu is belonging to  */            \
 201    int nr_cores;  /* number of cores within this CPU package */        \
 202    int nr_threads;/* number of threads within this CPU */              \
 203    int running; /* Nonzero if cpu is currently running(usermode).  */  \
 204    int thread_id;                                                      \
 205    /* user data */                                                     \
 206    void *opaque;                                                       \
 207                                                                        \
 208    uint32_t created;                                                   \
 209    uint32_t stop;   /* Stop request */                                 \
 210    uint32_t stopped; /* Artificially stopped */                        \
 211    struct QemuCond *halt_cond;                                         \
 212    struct qemu_work_item *queued_work_first, *queued_work_last;        \
 213    const char *cpu_model_str;                                          \
 214    struct KVMState *kvm_state;                                         \
 215    struct kvm_run *kvm_run;                                            \
 216    int kvm_fd;                                                         \
 217    int kvm_vcpu_dirty;
 218
 219#endif
 220