1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#ifndef _EXEC_ALL_H_
21#define _EXEC_ALL_H_
22
23#include "qemu-common.h"
24
25
26#define DEBUG_DISAS
27
28
29
30
31#if defined(CONFIG_USER_ONLY)
32typedef abi_ulong tb_page_addr_t;
33#else
34typedef ram_addr_t tb_page_addr_t;
35#endif
36
37
38#define DISAS_NEXT 0
39#define DISAS_JUMP 1
40#define DISAS_UPDATE 2
41#define DISAS_TB_JUMP 3
42
43struct TranslationBlock;
44typedef struct TranslationBlock TranslationBlock;
45
46
47#define MAX_OP_PER_INSTR 266
48
49#if HOST_LONG_BITS == 32
50#define MAX_OPC_PARAM_PER_ARG 2
51#else
52#define MAX_OPC_PARAM_PER_ARG 1
53#endif
54#define MAX_OPC_PARAM_IARGS 5
55#define MAX_OPC_PARAM_OARGS 1
56#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
57
58
59
60
61#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
62#define OPC_BUF_SIZE 640
63#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
64
65#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
66
67#include "qemu/log.h"
68
69void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
70void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
71 target_ulong *data);
72
73void cpu_gen_init(void);
74bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
75
76void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
77void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
78TranslationBlock *tb_gen_code(CPUState *cpu,
79 target_ulong pc, target_ulong cs_base, int flags,
80 int cflags);
81void cpu_exec_init(CPUState *cpu, Error **errp);
82void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
83void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
84
85#if !defined(CONFIG_USER_ONLY)
86void cpu_reloading_memory_map(void);
87void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
88
89
90
91
92
93
94
95
96
97void tlb_flush_page(CPUState *cpu, target_ulong addr);
98
99
100
101
102
103
104
105
106
107
108
109void tlb_flush(CPUState *cpu, int flush_global);
110
111
112
113
114
115
116
117
118
119void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
120
121
122
123
124
125
126
127
128void tlb_flush_by_mmuidx(CPUState *cpu, ...);
129void tlb_set_page(CPUState *cpu, target_ulong vaddr,
130 hwaddr paddr, int prot,
131 int mmu_idx, target_ulong size);
132void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
133 hwaddr paddr, MemTxAttrs attrs,
134 int prot, int mmu_idx, target_ulong size);
135void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
136void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
137 uintptr_t retaddr);
138#else
139static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
140{
141}
142
143static inline void tlb_flush(CPUState *cpu, int flush_global)
144{
145}
146
147static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
148 target_ulong addr, ...)
149{
150}
151
152static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
153{
154}
155#endif
156
157#define CODE_GEN_ALIGN 16
158
159#define CODE_GEN_PHYS_HASH_BITS 15
160#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
161
162
163
164
165
166#if defined(CONFIG_SOFTMMU)
167#define CODE_GEN_AVG_BLOCK_SIZE 400
168#else
169#define CODE_GEN_AVG_BLOCK_SIZE 150
170#endif
171
172#if defined(__arm__) || defined(_ARCH_PPC) \
173 || defined(__x86_64__) || defined(__i386__) \
174 || defined(__sparc__) || defined(__aarch64__) \
175 || defined(__s390x__) || defined(__mips__) \
176 || defined(CONFIG_TCG_INTERPRETER)
177#define USE_DIRECT_JUMP
178#endif
179
180struct TranslationBlock {
181 target_ulong pc;
182 target_ulong cs_base;
183 uint64_t flags;
184 uint16_t size;
185
186 uint16_t icount;
187 uint32_t cflags;
188#define CF_COUNT_MASK 0x7fff
189#define CF_LAST_IO 0x8000
190#define CF_NOCACHE 0x10000
191#define CF_USE_ICOUNT 0x20000
192#define CF_IGNORE_ICOUNT 0x40000
193
194 void *tc_ptr;
195 uint8_t *tc_search;
196
197 struct TranslationBlock *phys_hash_next;
198
199 struct TranslationBlock *orig_tb;
200
201
202 struct TranslationBlock *page_next[2];
203 tb_page_addr_t page_addr[2];
204
205
206
207 uint16_t tb_next_offset[2];
208#ifdef USE_DIRECT_JUMP
209 uint16_t tb_jmp_offset[2];
210#else
211 uintptr_t tb_next[2];
212#endif
213
214
215
216
217 struct TranslationBlock *jmp_next[2];
218 struct TranslationBlock *jmp_first;
219};
220
221#include "qemu/thread.h"
222
223typedef struct TBContext TBContext;
224
225struct TBContext {
226
227 TranslationBlock *tbs;
228 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
229 int nb_tbs;
230
231 QemuMutex tb_lock;
232
233
234 int tb_flush_count;
235 int tb_phys_invalidate_count;
236
237 int tb_invalidated_flag;
238};
239
240void tb_free(TranslationBlock *tb);
241void tb_flush(CPUState *cpu);
242void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
243
244#if defined(USE_DIRECT_JUMP)
245
246#if defined(CONFIG_TCG_INTERPRETER)
247static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
248{
249
250 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
251
252}
253#elif defined(_ARCH_PPC)
254void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
255#define tb_set_jmp_target1 ppc_tb_set_jmp_target
256#elif defined(__i386__) || defined(__x86_64__)
257static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
258{
259
260 stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4));
261
262}
263#elif defined(__s390x__)
264static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
265{
266
267 intptr_t disp = addr - (jmp_addr - 2);
268 stl_be_p((void*)jmp_addr, disp / 2);
269
270}
271#elif defined(__aarch64__)
272void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
273#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
274#elif defined(__arm__)
275static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
276{
277#if !QEMU_GNUC_PREREQ(4, 1)
278 register unsigned long _beg __asm ("a1");
279 register unsigned long _end __asm ("a2");
280 register unsigned long _flg __asm ("a3");
281#endif
282
283
284 *(uint32_t *)jmp_addr =
285 (*(uint32_t *)jmp_addr & ~0xffffff)
286 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
287
288#if QEMU_GNUC_PREREQ(4, 1)
289 __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
290#else
291
292 _beg = jmp_addr;
293 _end = jmp_addr + 4;
294 _flg = 0;
295 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
296#endif
297}
298#elif defined(__sparc__) || defined(__mips__)
299void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
300#else
301#error tb_set_jmp_target1 is missing
302#endif
303
304static inline void tb_set_jmp_target(TranslationBlock *tb,
305 int n, uintptr_t addr)
306{
307 uint16_t offset = tb->tb_jmp_offset[n];
308 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
309}
310
311#else
312
313
314static inline void tb_set_jmp_target(TranslationBlock *tb,
315 int n, uintptr_t addr)
316{
317 tb->tb_next[n] = addr;
318}
319
320#endif
321
322static inline void tb_add_jump(TranslationBlock *tb, int n,
323 TranslationBlock *tb_next)
324{
325
326 if (!tb->jmp_next[n]) {
327
328 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
329
330
331 tb->jmp_next[n] = tb_next->jmp_first;
332 tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
333 }
334}
335
336
337
338#if defined(CONFIG_TCG_INTERPRETER)
339extern uintptr_t tci_tb_ptr;
340# define GETRA() tci_tb_ptr
341#else
342# define GETRA() \
343 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
344#endif
345
346
347
348
349
350
351
352
353#define GETPC_ADJ 2
354
355#define GETPC() (GETRA() - GETPC_ADJ)
356
357#if !defined(CONFIG_USER_ONLY)
358
359struct MemoryRegion *iotlb_to_region(CPUState *cpu,
360 hwaddr index);
361
362void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
363 uintptr_t retaddr);
364
365#endif
366
367#if defined(CONFIG_USER_ONLY)
368void mmap_lock(void);
369void mmap_unlock(void);
370
371static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
372{
373 return addr;
374}
375#else
376static inline void mmap_lock(void) {}
377static inline void mmap_unlock(void) {}
378
379
380tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
381
382void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
383void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
384
385
386void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
387
388MemoryRegionSection *
389address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr, hwaddr *xlat,
390 hwaddr *plen);
391hwaddr memory_region_section_get_iotlb(CPUState *cpu,
392 MemoryRegionSection *section,
393 target_ulong vaddr,
394 hwaddr paddr, hwaddr xlat,
395 int prot,
396 target_ulong *address);
397bool memory_region_is_unassigned(MemoryRegion *mr);
398
399#endif
400
401
402extern int singlestep;
403
404
405extern CPUState *tcg_current_cpu;
406extern bool exit_request;
407
408#endif
409