1/* 2 * common defines for all CPUs 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19#ifndef CPU_DEFS_H 20#define CPU_DEFS_H 21 22#ifndef NEED_CPU_H 23#error cpu.h included from common code 24#endif 25 26#include "config.h" 27#include <setjmp.h> 28#include <inttypes.h> 29#include <signal.h> 30#include "osdep.h" 31#include "sys-queue.h" 32#include "targphys.h" 33 34#ifndef TARGET_LONG_BITS 35#error TARGET_LONG_BITS must be defined before including this header 36#endif 37 38#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) 39 40/* target_ulong is the type of a virtual address */ 41#if TARGET_LONG_SIZE == 4 42typedef int32_t target_long; 43typedef uint32_t target_ulong; 44#define TARGET_FMT_lx "%08x" 45#define TARGET_FMT_ld "%d" 46#define TARGET_FMT_lu "%u" 47#elif TARGET_LONG_SIZE == 8 48typedef int64_t target_long; 49typedef uint64_t target_ulong; 50#define TARGET_FMT_lx "%016" PRIx64 51#define TARGET_FMT_ld "%" PRId64 52#define TARGET_FMT_lu "%" PRIu64 53#else 54#error TARGET_LONG_SIZE undefined 55#endif 56 57#define HOST_LONG_SIZE (HOST_LONG_BITS / 8) 58 59#define EXCP_INTERRUPT 0x10000 /* async interruption */ 60#define EXCP_HLT 0x10001 /* hlt instruction reached */ 61#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ 62#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ 63 64#define TB_JMP_CACHE_BITS 12 65#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) 66 67/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for 68 addresses on the same page. The top bits are the same. This allows 69 TLB invalidation to quickly clear a subset of the hash table. */ 70#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2) 71#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS) 72#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1) 73#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE) 74 75#define CPU_TLB_BITS 8 76#define CPU_TLB_SIZE (1 << CPU_TLB_BITS) 77 78#if TARGET_PHYS_ADDR_BITS == 32 && TARGET_LONG_BITS == 32 79#define CPU_TLB_ENTRY_BITS 4 80#else 81#define CPU_TLB_ENTRY_BITS 5 82#endif 83 84typedef struct CPUTLBEntry { 85 /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address 86 bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not 87 go directly to ram. 88 bit 3 : indicates that the entry is invalid 89 bit 2..0 : zero 90 */ 91 target_ulong addr_read; 92 target_ulong addr_write; 93 target_ulong addr_code; 94 /* Addend to virtual address to get physical address. IO accesses 95 use the corresponding iotlb value. */ 96#if TARGET_PHYS_ADDR_BITS == 64 97 /* on i386 Linux make sure it is aligned */ 98 target_phys_addr_t addend __attribute__((aligned(8))); 99#else 100 target_phys_addr_t addend; 101#endif 102 /* padding to get a power of two size */ 103 uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - 104 (sizeof(target_ulong) * 3 + 105 ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) + 106 sizeof(target_phys_addr_t))]; 107} CPUTLBEntry; 108 109#ifdef WORDS_BIGENDIAN 110typedef struct icount_decr_u16 { 111 uint16_t high; 112 uint16_t low; 113} icount_decr_u16; 114#else 115typedef struct icount_decr_u16 { 116 uint16_t low; 117 uint16_t high; 118} icount_decr_u16; 119#endif 120 121struct kvm_run; 122struct KVMState; 123 124typedef struct CPUBreakpoint { 125 target_ulong pc; 126 int flags; /* BP_* */ 127 TAILQ_ENTRY(CPUBreakpoint) entry; 128} CPUBreakpoint; 129 130typedef struct CPUWatchpoint { 131 target_ulong vaddr; 132 target_ulong len_mask; 133 int flags; /* BP_* */ 134 TAILQ_ENTRY(CPUWatchpoint) entry; 135} CPUWatchpoint; 136 137#define CPU_TEMP_BUF_NLONGS 128 138#define CPU_COMMON \ 139 struct TranslationBlock *current_tb; /* currently executing TB */ \ 140 /* soft mmu support */ \ 141 /* in order to avoid passing too many arguments to the MMIO \ 142 helpers, we store some rarely used information in the CPU \ 143 context) */ \ 144 unsigned long mem_io_pc; /* host pc at which the memory was \ 145 accessed */ \ 146 target_ulong mem_io_vaddr; /* target virtual addr at which the \ 147 memory was accessed */ \ 148 uint32_t halted; /* Nonzero if the CPU is in suspend state */ \ 149 uint32_t stop; /* Stop request */ \ 150 uint32_t stopped; /* Artificially stopped */ \ 151 uint32_t interrupt_request; \ 152 volatile sig_atomic_t exit_request; \ 153 /* The meaning of the MMU modes is defined in the target code. */ \ 154 CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ 155 target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ 156 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ 157 /* buffer for temporaries in the code generator */ \ 158 long temp_buf[CPU_TEMP_BUF_NLONGS]; \ 159 \ 160 int64_t icount_extra; /* Instructions until next timer event. */ \ 161 /* Number of cycles left, with interrupt flag in high bit. \ 162 This allows a single read-compare-cbranch-write sequence to test \ 163 for both decrementer underflow and exceptions. */ \ 164 union { \ 165 uint32_t u32; \ 166 icount_decr_u16 u16; \ 167 } icount_decr; \ 168 uint32_t can_do_io; /* nonzero if memory mapped IO is safe. */ \ 169 \ 170 /* from this point: preserved by CPU reset */ \ 171 /* ice debug support */ \ 172 TAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \ 173 int singlestep_enabled; \ 174 \ 175 TAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \ 176 CPUWatchpoint *watchpoint_hit; \ 177 \ 178 struct GDBRegisterState *gdb_regs; \ 179 \ 180 /* Core interrupt code */ \ 181 jmp_buf jmp_env; \ 182 int exception_index; \ 183 \ 184 CPUState *next_cpu; /* next CPU sharing TB cache */ \ 185 int cpu_index; /* CPU index (informative) */ \ 186 uint32_t host_tid; /* host thread ID */ \ 187 int numa_node; /* NUMA node this cpu is belonging to */ \ 188 int running; /* Nonzero if cpu is currently running(usermode). */ \ 189 /* user data */ \ 190 void *opaque; \ 191 \ 192 uint32_t created; \ 193 struct QemuThread *thread; \ 194 struct QemuCond *halt_cond; \ 195 const char *cpu_model_str; \ 196 struct KVMState *kvm_state; \ 197 struct kvm_run *kvm_run; \ 198 int kvm_fd; 199 200#endif 201