1
2
3
4
5
6
7
8
9#ifndef ACCEL_TCG_INTERNAL_H
10#define ACCEL_TCG_INTERNAL_H
11
12#include "exec/exec-all.h"
13
14
15
16
17
18
19
20#ifdef CONFIG_SOFTMMU
21#define assert_memory_lock()
22#else
23#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
24#endif
25
26typedef struct PageDesc {
27
28 uintptr_t first_tb;
29#ifdef CONFIG_USER_ONLY
30 unsigned long flags;
31 void *target_data;
32#endif
33#ifdef CONFIG_SOFTMMU
34 QemuSpin lock;
35#endif
36} PageDesc;
37
38
39#define V_L2_BITS 10
40#define V_L2_SIZE (1 << V_L2_BITS)
41
42
43
44
45extern int v_l1_size;
46extern int v_l1_shift;
47extern int v_l2_levels;
48
49
50
51
52
53#define V_L1_MIN_BITS 4
54#define V_L1_MAX_BITS (V_L2_BITS + 3)
55#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
56
57extern void *l1_map[V_L1_MAX_SIZE];
58
59PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc);
60
61static inline PageDesc *page_find(tb_page_addr_t index)
62{
63 return page_find_alloc(index, false);
64}
65
66
67#define TB_FOR_EACH_TAGGED(head, tb, n, field) \
68 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
69 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
70 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
71
72#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
73 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
74
75#define TB_FOR_EACH_JMP(head_tb, tb, n) \
76 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
77
78
79#ifdef CONFIG_USER_ONLY
80#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
81static inline void page_lock(PageDesc *pd) { }
82static inline void page_unlock(PageDesc *pd) { }
83#else
84#ifdef CONFIG_DEBUG_TCG
85void do_assert_page_locked(const PageDesc *pd, const char *file, int line);
86#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
87#else
88#define assert_page_locked(pd)
89#endif
90void page_lock(PageDesc *pd);
91void page_unlock(PageDesc *pd);
92#endif
93#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
94void assert_no_pages_locked(void);
95#else
96static inline void assert_no_pages_locked(void) { }
97#endif
98
99TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
100 target_ulong cs_base, uint32_t flags,
101 int cflags);
102G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
103void page_init(void);
104void tb_htable_init(void);
105void tb_reset_jump(TranslationBlock *tb, int n);
106TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
107 tb_page_addr_t phys_page2);
108bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
109void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
110 uintptr_t host_pc);
111
112
113static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
114{
115#if TARGET_TB_PCREL
116 return cpu->cc->get_pc(cpu);
117#else
118 return tb_pc(tb);
119#endif
120}
121
122#endif
123