1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#ifndef _EXEC_ALL_H_
21#define _EXEC_ALL_H_
22
23#include "qemu-common.h"
24
25
26#define DEBUG_DISAS
27
28
29
30
31#if defined(CONFIG_USER_ONLY)
32typedef abi_ulong tb_page_addr_t;
33#else
34typedef ram_addr_t tb_page_addr_t;
35#endif
36
37
38#define DISAS_NEXT 0
39#define DISAS_JUMP 1
40#define DISAS_UPDATE 2
41#define DISAS_TB_JUMP 3
42
43struct TranslationBlock;
44typedef struct TranslationBlock TranslationBlock;
45
46
47#define MAX_OP_PER_INSTR 208
48
49#if HOST_LONG_BITS == 32
50#define MAX_OPC_PARAM_PER_ARG 2
51#else
52#define MAX_OPC_PARAM_PER_ARG 1
53#endif
54#define MAX_OPC_PARAM_IARGS 5
55#define MAX_OPC_PARAM_OARGS 1
56#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
57
58
59
60
61#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
62#define OPC_BUF_SIZE 640
63#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
64
65
66
67
68
69#define TCG_MAX_OP_SIZE 192
70
71#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
72
73#include "qemu/log.h"
74
75void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
76void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
77void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
78 int pc_pos);
79
80void cpu_gen_init(void);
81int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
82 int *gen_code_size_ptr);
83bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc);
84
85void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
86void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
87TranslationBlock *tb_gen_code(CPUArchState *env,
88 target_ulong pc, target_ulong cs_base, int flags,
89 int cflags);
90void cpu_exec_init(CPUArchState *env);
91void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
92int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
93void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
94 int is_cpu_write_access);
95void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
96 int is_cpu_write_access);
97#if !defined(CONFIG_USER_ONLY)
98
99void tlb_flush_page(CPUArchState *env, target_ulong addr);
100void tlb_flush(CPUArchState *env, int flush_global);
101void tlb_set_page(CPUArchState *env, target_ulong vaddr,
102 hwaddr paddr, int prot,
103 int mmu_idx, target_ulong size);
104void tb_invalidate_phys_addr(hwaddr addr);
105#else
106static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
107{
108}
109
110static inline void tlb_flush(CPUArchState *env, int flush_global)
111{
112}
113#endif
114
115#define CODE_GEN_ALIGN 16
116
117#define CODE_GEN_PHYS_HASH_BITS 15
118#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
119
120
121
122
123#if defined(CONFIG_SOFTMMU)
124#define CODE_GEN_AVG_BLOCK_SIZE 128
125#else
126#define CODE_GEN_AVG_BLOCK_SIZE 64
127#endif
128
129#if defined(__arm__) || defined(_ARCH_PPC) \
130 || defined(__x86_64__) || defined(__i386__) \
131 || defined(__sparc__) || defined(__aarch64__) \
132 || defined(CONFIG_TCG_INTERPRETER)
133#define USE_DIRECT_JUMP
134#endif
135
136struct TranslationBlock {
137 target_ulong pc;
138 target_ulong cs_base;
139 uint64_t flags;
140 uint16_t size;
141
142 uint16_t cflags;
143#define CF_COUNT_MASK 0x7fff
144#define CF_LAST_IO 0x8000
145
146 uint8_t *tc_ptr;
147
148 struct TranslationBlock *phys_hash_next;
149
150
151 struct TranslationBlock *page_next[2];
152 tb_page_addr_t page_addr[2];
153
154
155
156 uint16_t tb_next_offset[2];
157#ifdef USE_DIRECT_JUMP
158 uint16_t tb_jmp_offset[2];
159#else
160 uintptr_t tb_next[2];
161#endif
162
163
164
165
166 struct TranslationBlock *jmp_next[2];
167 struct TranslationBlock *jmp_first;
168 uint32_t icount;
169};
170
171#include "exec/spinlock.h"
172
173typedef struct TBContext TBContext;
174
175struct TBContext {
176
177 TranslationBlock *tbs;
178 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
179 int nb_tbs;
180
181 spinlock_t tb_lock;
182
183
184 int tb_flush_count;
185 int tb_phys_invalidate_count;
186
187 int tb_invalidated_flag;
188};
189
190static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
191{
192 target_ulong tmp;
193 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
194 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
195}
196
197static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
198{
199 target_ulong tmp;
200 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
201 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
202 | (tmp & TB_JMP_ADDR_MASK));
203}
204
205static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
206{
207 return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
208}
209
210void tb_free(TranslationBlock *tb);
211void tb_flush(CPUArchState *env);
212void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
213
214#if defined(USE_DIRECT_JUMP)
215
216#if defined(CONFIG_TCG_INTERPRETER)
217static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
218{
219
220 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
221
222}
223#elif defined(_ARCH_PPC)
224void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
225#define tb_set_jmp_target1 ppc_tb_set_jmp_target
226#elif defined(__i386__) || defined(__x86_64__)
227static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
228{
229
230 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
231
232}
233#elif defined(__aarch64__)
234void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
235#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
236#elif defined(__arm__)
237static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
238{
239#if !QEMU_GNUC_PREREQ(4, 1)
240 register unsigned long _beg __asm ("a1");
241 register unsigned long _end __asm ("a2");
242 register unsigned long _flg __asm ("a3");
243#endif
244
245
246 *(uint32_t *)jmp_addr =
247 (*(uint32_t *)jmp_addr & ~0xffffff)
248 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
249
250#if QEMU_GNUC_PREREQ(4, 1)
251 __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
252#else
253
254 _beg = jmp_addr;
255 _end = jmp_addr + 4;
256 _flg = 0;
257 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
258#endif
259}
260#elif defined(__sparc__)
261void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
262#else
263#error tb_set_jmp_target1 is missing
264#endif
265
266static inline void tb_set_jmp_target(TranslationBlock *tb,
267 int n, uintptr_t addr)
268{
269 uint16_t offset = tb->tb_jmp_offset[n];
270 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
271}
272
273#else
274
275
276static inline void tb_set_jmp_target(TranslationBlock *tb,
277 int n, uintptr_t addr)
278{
279 tb->tb_next[n] = addr;
280}
281
282#endif
283
284static inline void tb_add_jump(TranslationBlock *tb, int n,
285 TranslationBlock *tb_next)
286{
287
288 if (!tb->jmp_next[n]) {
289
290 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
291
292
293 tb->jmp_next[n] = tb_next->jmp_first;
294 tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
295 }
296}
297
298
299
300#if defined(CONFIG_TCG_INTERPRETER)
301extern uintptr_t tci_tb_ptr;
302# define GETPC() tci_tb_ptr
303#elif defined(__s390__) && !defined(__s390x__)
304# define GETPC() \
305 (((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)
306#elif defined(__arm__)
307
308
309# define GETPC() ((uintptr_t)__builtin_return_address(0) - 2)
310#else
311# define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
312#endif
313
314#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
315
316
317
318
319
320
321
322
323
324
325
326
327
328# if defined(__i386__) || defined(__x86_64__)
329
330
331
332
333
334
335
336
337
338# define GETRA() ((uintptr_t)__builtin_return_address(0))
339# define GETPC_LDST() ((uintptr_t)(GETRA() + 7 + \
340 *(int32_t *)((void *)GETRA() + 3) - 1))
341# elif defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
342# define GETRA() ((uintptr_t)__builtin_return_address(0))
343# define GETPC_LDST() ((uintptr_t) ((*(int32_t *)(GETRA() - 4)) - 1))
344# elif defined(__arm__)
345
346
347# define GETRA() ((uintptr_t)__builtin_return_address(0))
348# define GETPC_LDST() tcg_getpc_ldst(GETRA())
349static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
350{
351 int32_t b;
352 ra += 8;
353 b = *(int32_t *)ra;
354 b = (b << 8) >> (8 - 2);
355 ra += 8;
356 ra += b;
357 ra -= 4;
358
359 return ra;
360}
361#elif defined(__aarch64__)
362# define GETRA() ((uintptr_t)__builtin_return_address(0))
363# define GETPC_LDST() tcg_getpc_ldst(GETRA())
364static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
365{
366 int32_t b;
367 ra += 4;
368 b = *(int32_t *)ra;
369 b = (b << 6) >> (6 - 2);
370 ra += b;
371 ra -= 4;
372
373 return ra;
374}
375# else
376# error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!"
377# endif
378bool is_tcg_gen_code(uintptr_t pc_ptr);
379# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
380#else
381# define GETPC_EXT() GETPC()
382#endif
383
384#if !defined(CONFIG_USER_ONLY)
385
386struct MemoryRegion *iotlb_to_region(hwaddr index);
387bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
388 uint64_t *pvalue, unsigned size);
389bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
390 uint64_t value, unsigned size);
391
392void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
393 uintptr_t retaddr);
394
395#include "exec/softmmu_defs.h"
396
397#define ACCESS_TYPE (NB_MMU_MODES + 1)
398#define MEMSUFFIX _code
399
400#define DATA_SIZE 1
401#include "exec/softmmu_header.h"
402
403#define DATA_SIZE 2
404#include "exec/softmmu_header.h"
405
406#define DATA_SIZE 4
407#include "exec/softmmu_header.h"
408
409#define DATA_SIZE 8
410#include "exec/softmmu_header.h"
411
412#undef ACCESS_TYPE
413#undef MEMSUFFIX
414
415#endif
416
417#if defined(CONFIG_USER_ONLY)
418static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
419{
420 return addr;
421}
422#else
423
424tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
425#endif
426
427typedef void (CPUDebugExcpHandler)(CPUArchState *env);
428
429void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
430
431
432extern int singlestep;
433
434
435extern volatile sig_atomic_t exit_request;
436
437
438
439static inline int can_do_io(CPUArchState *env)
440{
441 CPUState *cpu = ENV_GET_CPU(env);
442
443 if (!use_icount) {
444 return 1;
445 }
446
447 if (cpu->current_tb == NULL) {
448 return 1;
449 }
450 return env->can_do_io != 0;
451}
452
453#endif
454