1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#ifndef _EXEC_ALL_H_
21#define _EXEC_ALL_H_
22
23#include "qemu-common.h"
24
25
26#define DEBUG_DISAS
27
28
29
30
31#if defined(CONFIG_USER_ONLY)
32typedef abi_ulong tb_page_addr_t;
33#else
34typedef ram_addr_t tb_page_addr_t;
35#endif
36
37
38#define DISAS_NEXT 0
39#define DISAS_JUMP 1
40#define DISAS_UPDATE 2
41#define DISAS_TB_JUMP 3
42
43struct TranslationBlock;
44typedef struct TranslationBlock TranslationBlock;
45
46
47#define MAX_OP_PER_INSTR 266
48
49#if HOST_LONG_BITS == 32
50#define MAX_OPC_PARAM_PER_ARG 2
51#else
52#define MAX_OPC_PARAM_PER_ARG 1
53#endif
54#define MAX_OPC_PARAM_IARGS 5
55#define MAX_OPC_PARAM_OARGS 1
56#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
57
58
59
60
61#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
62#define OPC_BUF_SIZE 640
63#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
64
65#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
66
67#include "qemu/log.h"
68
69void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
70void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
71 target_ulong *data);
72
73void cpu_gen_init(void);
74bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
75
76void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
77void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
78TranslationBlock *tb_gen_code(CPUState *cpu,
79 target_ulong pc, target_ulong cs_base, int flags,
80 int cflags);
81void cpu_exec_init(CPUState *cpu, Error **errp);
82void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
83void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
84
85#if !defined(CONFIG_USER_ONLY)
86void cpu_reloading_memory_map(void);
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
106
107
108
109
110
111
112
113
114AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
115
116
117
118
119
120
121
122
123
124void tlb_flush_page(CPUState *cpu, target_ulong addr);
125
126
127
128
129
130
131
132
133
134
135
136void tlb_flush(CPUState *cpu, int flush_global);
137
138
139
140
141
142
143
144
145
146void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
147
148
149
150
151
152
153
154
155void tlb_flush_by_mmuidx(CPUState *cpu, ...);
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
179 hwaddr paddr, MemTxAttrs attrs,
180 int prot, int mmu_idx, target_ulong size);
181
182
183
184
185
186
187void tlb_set_page(CPUState *cpu, target_ulong vaddr,
188 hwaddr paddr, int prot,
189 int mmu_idx, target_ulong size);
190void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
191void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
192 uintptr_t retaddr);
193#else
194static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
195{
196}
197
198static inline void tlb_flush(CPUState *cpu, int flush_global)
199{
200}
201
202static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
203 target_ulong addr, ...)
204{
205}
206
207static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
208{
209}
210#endif
211
212#define CODE_GEN_ALIGN 16
213
214#define CODE_GEN_PHYS_HASH_BITS 15
215#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
216
217
218
219
220
221#if defined(CONFIG_SOFTMMU)
222#define CODE_GEN_AVG_BLOCK_SIZE 400
223#else
224#define CODE_GEN_AVG_BLOCK_SIZE 150
225#endif
226
227#if defined(__arm__) || defined(_ARCH_PPC) \
228 || defined(__x86_64__) || defined(__i386__) \
229 || defined(__sparc__) || defined(__aarch64__) \
230 || defined(__s390x__) || defined(__mips__) \
231 || defined(CONFIG_TCG_INTERPRETER)
232#define USE_DIRECT_JUMP
233#endif
234
235struct TranslationBlock {
236 target_ulong pc;
237 target_ulong cs_base;
238 uint64_t flags;
239 uint16_t size;
240
241 uint16_t icount;
242 uint32_t cflags;
243#define CF_COUNT_MASK 0x7fff
244#define CF_LAST_IO 0x8000
245#define CF_NOCACHE 0x10000
246#define CF_USE_ICOUNT 0x20000
247#define CF_IGNORE_ICOUNT 0x40000
248
249 void *tc_ptr;
250 uint8_t *tc_search;
251
252 struct TranslationBlock *phys_hash_next;
253
254 struct TranslationBlock *orig_tb;
255
256
257 struct TranslationBlock *page_next[2];
258 tb_page_addr_t page_addr[2];
259
260
261
262 uint16_t tb_next_offset[2];
263#ifdef USE_DIRECT_JUMP
264 uint16_t tb_jmp_offset[2];
265#else
266 uintptr_t tb_next[2];
267#endif
268
269
270
271
272 struct TranslationBlock *jmp_next[2];
273 struct TranslationBlock *jmp_first;
274};
275
276#include "qemu/thread.h"
277
278typedef struct TBContext TBContext;
279
280struct TBContext {
281
282 TranslationBlock *tbs;
283 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
284 int nb_tbs;
285
286 QemuMutex tb_lock;
287
288
289 int tb_flush_count;
290 int tb_phys_invalidate_count;
291
292 int tb_invalidated_flag;
293};
294
295void tb_free(TranslationBlock *tb);
296void tb_flush(CPUState *cpu);
297void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
298
299#if defined(USE_DIRECT_JUMP)
300
301#if defined(CONFIG_TCG_INTERPRETER)
302static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
303{
304
305 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
306
307}
308#elif defined(_ARCH_PPC)
309void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
310#define tb_set_jmp_target1 ppc_tb_set_jmp_target
311#elif defined(__i386__) || defined(__x86_64__)
312static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
313{
314
315 stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4));
316
317}
318#elif defined(__s390x__)
319static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
320{
321
322 intptr_t disp = addr - (jmp_addr - 2);
323 stl_be_p((void*)jmp_addr, disp / 2);
324
325}
326#elif defined(__aarch64__)
327void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
328#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
329#elif defined(__arm__)
330static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
331{
332#if !QEMU_GNUC_PREREQ(4, 1)
333 register unsigned long _beg __asm ("a1");
334 register unsigned long _end __asm ("a2");
335 register unsigned long _flg __asm ("a3");
336#endif
337
338
339 *(uint32_t *)jmp_addr =
340 (*(uint32_t *)jmp_addr & ~0xffffff)
341 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
342
343#if QEMU_GNUC_PREREQ(4, 1)
344 __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
345#else
346
347 _beg = jmp_addr;
348 _end = jmp_addr + 4;
349 _flg = 0;
350 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
351#endif
352}
353#elif defined(__sparc__) || defined(__mips__)
354void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
355#else
356#error tb_set_jmp_target1 is missing
357#endif
358
359static inline void tb_set_jmp_target(TranslationBlock *tb,
360 int n, uintptr_t addr)
361{
362 uint16_t offset = tb->tb_jmp_offset[n];
363 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
364}
365
366#else
367
368
369static inline void tb_set_jmp_target(TranslationBlock *tb,
370 int n, uintptr_t addr)
371{
372 tb->tb_next[n] = addr;
373}
374
375#endif
376
377static inline void tb_add_jump(TranslationBlock *tb, int n,
378 TranslationBlock *tb_next)
379{
380
381 if (!tb->jmp_next[n]) {
382 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
383 "Linking TBs %p [" TARGET_FMT_lx
384 "] index %d -> %p [" TARGET_FMT_lx "]\n",
385 tb->tc_ptr, tb->pc, n,
386 tb_next->tc_ptr, tb_next->pc);
387
388 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
389
390
391 tb->jmp_next[n] = tb_next->jmp_first;
392 tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
393 }
394}
395
396
397
398#if defined(CONFIG_TCG_INTERPRETER)
399extern uintptr_t tci_tb_ptr;
400# define GETRA() tci_tb_ptr
401#else
402# define GETRA() \
403 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
404#endif
405
406
407
408
409
410
411
412
413#define GETPC_ADJ 2
414
415#define GETPC() (GETRA() - GETPC_ADJ)
416
417#if !defined(CONFIG_USER_ONLY)
418
419struct MemoryRegion *iotlb_to_region(CPUState *cpu,
420 hwaddr index, MemTxAttrs attrs);
421
422void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
423 uintptr_t retaddr);
424
425#endif
426
427#if defined(CONFIG_USER_ONLY)
428void mmap_lock(void);
429void mmap_unlock(void);
430
431static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
432{
433 return addr;
434}
435#else
436static inline void mmap_lock(void) {}
437static inline void mmap_unlock(void) {}
438
439
440tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
441
442void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
443void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
444
445
446void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
447
448MemoryRegionSection *
449address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
450 hwaddr *xlat, hwaddr *plen);
451hwaddr memory_region_section_get_iotlb(CPUState *cpu,
452 MemoryRegionSection *section,
453 target_ulong vaddr,
454 hwaddr paddr, hwaddr xlat,
455 int prot,
456 target_ulong *address);
457bool memory_region_is_unassigned(MemoryRegion *mr);
458
459#endif
460
461
462extern int singlestep;
463
464
465extern CPUState *tcg_current_cpu;
466extern bool exit_request;
467
468#endif
469