1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#ifndef EXEC_ALL_H
21#define EXEC_ALL_H
22
23#include "qemu-common.h"
24#include "exec/tb-context.h"
25
26
27#define DEBUG_DISAS
28
29
30
31
32#if defined(CONFIG_USER_ONLY)
33typedef abi_ulong tb_page_addr_t;
34#else
35typedef ram_addr_t tb_page_addr_t;
36#endif
37
38
39#define DISAS_NEXT 0
40#define DISAS_JUMP 1
41#define DISAS_UPDATE 2
42#define DISAS_TB_JUMP 3
43
44#include "qemu/log.h"
45
46void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
47void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
48 target_ulong *data);
49
50void cpu_gen_init(void);
51bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
52
53void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
54void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
55TranslationBlock *tb_gen_code(CPUState *cpu,
56 target_ulong pc, target_ulong cs_base,
57 uint32_t flags,
58 int cflags);
59
60void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
61void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
62void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
63
64#if !defined(CONFIG_USER_ONLY)
65void cpu_reloading_memory_map(void);
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
85
86
87
88
89
90
91
92
93
94void tlb_flush_page(CPUState *cpu, target_ulong addr);
95
96
97
98
99
100
101
102
103
104
105
106void tlb_flush(CPUState *cpu, int flush_global);
107
108
109
110
111
112
113
114
115
116void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
117
118
119
120
121
122
123
124
125void tlb_flush_by_mmuidx(CPUState *cpu, ...);
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
149 hwaddr paddr, MemTxAttrs attrs,
150 int prot, int mmu_idx, target_ulong size);
151
152
153
154
155
156
157void tlb_set_page(CPUState *cpu, target_ulong vaddr,
158 hwaddr paddr, int prot,
159 int mmu_idx, target_ulong size);
160void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
161void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
162 uintptr_t retaddr);
163#else
164static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
165{
166}
167
168static inline void tlb_flush(CPUState *cpu, int flush_global)
169{
170}
171
172static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
173 target_ulong addr, ...)
174{
175}
176
177static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
178{
179}
180#endif
181
182#define CODE_GEN_ALIGN 16
183
184
185
186
187
188#if defined(CONFIG_SOFTMMU)
189#define CODE_GEN_AVG_BLOCK_SIZE 400
190#else
191#define CODE_GEN_AVG_BLOCK_SIZE 150
192#endif
193
194#if defined(__arm__) || defined(_ARCH_PPC) \
195 || defined(__x86_64__) || defined(__i386__) \
196 || defined(__sparc__) || defined(__aarch64__) \
197 || defined(__s390x__) || defined(__mips__) \
198 || defined(CONFIG_TCG_INTERPRETER)
199
200#define USE_DIRECT_JUMP
201#endif
202
203struct TranslationBlock {
204 target_ulong pc;
205 target_ulong cs_base;
206 uint32_t flags;
207 uint16_t size;
208
209 uint16_t icount;
210 uint32_t cflags;
211#define CF_COUNT_MASK 0x7fff
212#define CF_LAST_IO 0x8000
213#define CF_NOCACHE 0x10000
214#define CF_USE_ICOUNT 0x20000
215#define CF_IGNORE_ICOUNT 0x40000
216
217 uint16_t invalid;
218
219 void *tc_ptr;
220 uint8_t *tc_search;
221
222 struct TranslationBlock *orig_tb;
223
224
225 struct TranslationBlock *page_next[2];
226 tb_page_addr_t page_addr[2];
227
228
229
230
231
232
233
234
235 uint16_t jmp_reset_offset[2];
236#define TB_JMP_RESET_OFFSET_INVALID 0xffff
237#ifdef USE_DIRECT_JUMP
238 uint16_t jmp_insn_offset[2];
239#else
240 uintptr_t jmp_target_addr[2];
241#endif
242
243
244
245
246
247
248
249
250
251
252
253
254 uintptr_t jmp_list_next[2];
255 uintptr_t jmp_list_first;
256};
257
258void tb_free(TranslationBlock *tb);
259void tb_flush(CPUState *cpu);
260void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
261
262#if defined(USE_DIRECT_JUMP)
263
264#if defined(CONFIG_TCG_INTERPRETER)
265static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
266{
267
268 atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
269
270}
271#elif defined(_ARCH_PPC)
272void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
273#define tb_set_jmp_target1 ppc_tb_set_jmp_target
274#elif defined(__i386__) || defined(__x86_64__)
275static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
276{
277
278 atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
279
280}
281#elif defined(__s390x__)
282static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
283{
284
285 intptr_t disp = addr - (jmp_addr - 2);
286 atomic_set((int32_t *)jmp_addr, disp / 2);
287
288}
289#elif defined(__aarch64__)
290void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
291#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
292#elif defined(__arm__)
293void arm_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
294#define tb_set_jmp_target1 arm_tb_set_jmp_target
295#elif defined(__sparc__) || defined(__mips__)
296void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
297#else
298#error tb_set_jmp_target1 is missing
299#endif
300
301static inline void tb_set_jmp_target(TranslationBlock *tb,
302 int n, uintptr_t addr)
303{
304 uint16_t offset = tb->jmp_insn_offset[n];
305 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
306}
307
308#else
309
310
311static inline void tb_set_jmp_target(TranslationBlock *tb,
312 int n, uintptr_t addr)
313{
314 tb->jmp_target_addr[n] = addr;
315}
316
317#endif
318
319
320static inline void tb_add_jump(TranslationBlock *tb, int n,
321 TranslationBlock *tb_next)
322{
323 assert(n < ARRAY_SIZE(tb->jmp_list_next));
324 if (tb->jmp_list_next[n]) {
325
326
327 return;
328 }
329 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
330 "Linking TBs %p [" TARGET_FMT_lx
331 "] index %d -> %p [" TARGET_FMT_lx "]\n",
332 tb->tc_ptr, tb->pc, n,
333 tb_next->tc_ptr, tb_next->pc);
334
335
336 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
337
338
339 tb->jmp_list_next[n] = tb_next->jmp_list_first;
340 tb_next->jmp_list_first = (uintptr_t)tb | n;
341}
342
343
344#if defined(CONFIG_TCG_INTERPRETER)
345extern uintptr_t tci_tb_ptr;
346# define GETPC() tci_tb_ptr
347#else
348# define GETPC() \
349 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
350#endif
351
352
353
354
355
356
357
358
359#define GETPC_ADJ 2
360
361#if !defined(CONFIG_USER_ONLY)
362
363struct MemoryRegion *iotlb_to_region(CPUState *cpu,
364 hwaddr index, MemTxAttrs attrs);
365
366void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type,
367 int mmu_idx, uintptr_t retaddr);
368
369#endif
370
371#if defined(CONFIG_USER_ONLY)
372void mmap_lock(void);
373void mmap_unlock(void);
374bool have_mmap_lock(void);
375
376static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
377{
378 return addr;
379}
380#else
381static inline void mmap_lock(void) {}
382static inline void mmap_unlock(void) {}
383
384
385tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
386
387void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
388void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
389
390
391void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
392
393MemoryRegionSection *
394address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
395 hwaddr *xlat, hwaddr *plen, int *prot,
396 MemTxAttrs *attr);
397hwaddr memory_region_section_get_iotlb(CPUState *cpu,
398 MemoryRegionSection *section,
399 target_ulong vaddr,
400 hwaddr paddr, hwaddr xlat,
401 int prot,
402 target_ulong *address);
403bool memory_region_is_unassigned(MemoryRegion *mr);
404
405#endif
406
407
408extern int singlestep;
409
410
411extern CPUState *tcg_current_cpu;
412extern bool exit_request;
413
414#endif
415