1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef HVF_X86_H
20#define HVF_X86_H
21
22typedef struct x86_register {
23 union {
24 struct {
25 uint64_t rrx;
26 };
27 struct {
28 uint32_t erx;
29 uint32_t hi32_unused1;
30 };
31 struct {
32 uint16_t rx;
33 uint16_t hi16_unused1;
34 uint32_t hi32_unused2;
35 };
36 struct {
37 uint8_t lx;
38 uint8_t hx;
39 uint16_t hi16_unused2;
40 uint32_t hi32_unused3;
41 };
42 };
43} __attribute__ ((__packed__)) x86_register;
44
45
46typedef struct x86_tss_segment16 {
47 uint16_t link;
48 uint16_t sp0;
49 uint16_t ss0;
50 uint32_t sp1;
51 uint16_t ss1;
52 uint32_t sp2;
53 uint16_t ss2;
54 uint16_t ip;
55 uint16_t flags;
56 uint16_t ax;
57 uint16_t cx;
58 uint16_t dx;
59 uint16_t bx;
60 uint16_t sp;
61 uint16_t bp;
62 uint16_t si;
63 uint16_t di;
64 uint16_t es;
65 uint16_t cs;
66 uint16_t ss;
67 uint16_t ds;
68 uint16_t ldtr;
69} __attribute__((packed)) x86_tss_segment16;
70
71
72typedef struct x86_tss_segment32 {
73 uint32_t prev_tss;
74 uint32_t esp0;
75 uint32_t ss0;
76 uint32_t esp1;
77 uint32_t ss1;
78 uint32_t esp2;
79 uint32_t ss2;
80 uint32_t cr3;
81 uint32_t eip;
82 uint32_t eflags;
83 uint32_t eax;
84 uint32_t ecx;
85 uint32_t edx;
86 uint32_t ebx;
87 uint32_t esp;
88 uint32_t ebp;
89 uint32_t esi;
90 uint32_t edi;
91 uint32_t es;
92 uint32_t cs;
93 uint32_t ss;
94 uint32_t ds;
95 uint32_t fs;
96 uint32_t gs;
97 uint32_t ldt;
98 uint16_t trap;
99 uint16_t iomap_base;
100} __attribute__ ((__packed__)) x86_tss_segment32;
101
102
103typedef struct x86_tss_segment64 {
104 uint32_t unused;
105 uint64_t rsp0;
106 uint64_t rsp1;
107 uint64_t rsp2;
108 uint64_t unused1;
109 uint64_t ist1;
110 uint64_t ist2;
111 uint64_t ist3;
112 uint64_t ist4;
113 uint64_t ist5;
114 uint64_t ist6;
115 uint64_t ist7;
116 uint64_t unused2;
117 uint16_t unused3;
118 uint16_t iomap_base;
119} __attribute__ ((__packed__)) x86_tss_segment64;
120
121
122typedef struct x86_segment_descriptor {
123 uint64_t limit0:16;
124 uint64_t base0:16;
125 uint64_t base1:8;
126 uint64_t type:4;
127 uint64_t s:1;
128 uint64_t dpl:2;
129 uint64_t p:1;
130 uint64_t limit1:4;
131 uint64_t avl:1;
132 uint64_t l:1;
133 uint64_t db:1;
134 uint64_t g:1;
135 uint64_t base2:8;
136} __attribute__ ((__packed__)) x86_segment_descriptor;
137
138static inline uint32_t x86_segment_base(x86_segment_descriptor *desc)
139{
140 return (uint32_t)((desc->base2 << 24) | (desc->base1 << 16) | desc->base0);
141}
142
143static inline void x86_set_segment_base(x86_segment_descriptor *desc,
144 uint32_t base)
145{
146 desc->base2 = base >> 24;
147 desc->base1 = (base >> 16) & 0xff;
148 desc->base0 = base & 0xffff;
149}
150
151static inline uint32_t x86_segment_limit(x86_segment_descriptor *desc)
152{
153 uint32_t limit = (uint32_t)((desc->limit1 << 16) | desc->limit0);
154 if (desc->g) {
155 return (limit << 12) | 0xfff;
156 }
157 return limit;
158}
159
160static inline void x86_set_segment_limit(x86_segment_descriptor *desc,
161 uint32_t limit)
162{
163 desc->limit0 = limit & 0xffff;
164 desc->limit1 = limit >> 16;
165}
166
167typedef struct x86_call_gate {
168 uint64_t offset0:16;
169 uint64_t selector:16;
170 uint64_t param_count:4;
171 uint64_t reserved:3;
172 uint64_t type:4;
173 uint64_t dpl:1;
174 uint64_t p:1;
175 uint64_t offset1:16;
176} __attribute__ ((__packed__)) x86_call_gate;
177
178static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)
179{
180 return (uint32_t)((gate->offset1 << 16) | gate->offset0);
181}
182
183#define GDT_SEL 0
184#define LDT_SEL 1
185
186typedef struct x68_segment_selector {
187 union {
188 uint16_t sel;
189 struct {
190 uint16_t rpl:2;
191 uint16_t ti:1;
192 uint16_t index:13;
193 };
194 };
195} __attribute__ ((__packed__)) x68_segment_selector;
196
197
198#define x86_reg(cpu, reg) ((x86_register *) &cpu->regs[reg])
199
200#define RRX(cpu, reg) (x86_reg(cpu, reg)->rrx)
201#define RAX(cpu) RRX(cpu, R_EAX)
202#define RCX(cpu) RRX(cpu, R_ECX)
203#define RDX(cpu) RRX(cpu, R_EDX)
204#define RBX(cpu) RRX(cpu, R_EBX)
205#define RSP(cpu) RRX(cpu, R_ESP)
206#define RBP(cpu) RRX(cpu, R_EBP)
207#define RSI(cpu) RRX(cpu, R_ESI)
208#define RDI(cpu) RRX(cpu, R_EDI)
209#define R8(cpu) RRX(cpu, R_R8)
210#define R9(cpu) RRX(cpu, R_R9)
211#define R10(cpu) RRX(cpu, R_R10)
212#define R11(cpu) RRX(cpu, R_R11)
213#define R12(cpu) RRX(cpu, R_R12)
214#define R13(cpu) RRX(cpu, R_R13)
215#define R14(cpu) RRX(cpu, R_R14)
216#define R15(cpu) RRX(cpu, R_R15)
217
218#define ERX(cpu, reg) (x86_reg(cpu, reg)->erx)
219#define EAX(cpu) ERX(cpu, R_EAX)
220#define ECX(cpu) ERX(cpu, R_ECX)
221#define EDX(cpu) ERX(cpu, R_EDX)
222#define EBX(cpu) ERX(cpu, R_EBX)
223#define ESP(cpu) ERX(cpu, R_ESP)
224#define EBP(cpu) ERX(cpu, R_EBP)
225#define ESI(cpu) ERX(cpu, R_ESI)
226#define EDI(cpu) ERX(cpu, R_EDI)
227
228#define RX(cpu, reg) (x86_reg(cpu, reg)->rx)
229#define AX(cpu) RX(cpu, R_EAX)
230#define CX(cpu) RX(cpu, R_ECX)
231#define DX(cpu) RX(cpu, R_EDX)
232#define BP(cpu) RX(cpu, R_EBP)
233#define SP(cpu) RX(cpu, R_ESP)
234#define BX(cpu) RX(cpu, R_EBX)
235#define SI(cpu) RX(cpu, R_ESI)
236#define DI(cpu) RX(cpu, R_EDI)
237
238#define RL(cpu, reg) (x86_reg(cpu, reg)->lx)
239#define AL(cpu) RL(cpu, R_EAX)
240#define CL(cpu) RL(cpu, R_ECX)
241#define DL(cpu) RL(cpu, R_EDX)
242#define BL(cpu) RL(cpu, R_EBX)
243
244#define RH(cpu, reg) (x86_reg(cpu, reg)->hx)
245#define AH(cpu) RH(cpu, R_EAX)
246#define CH(cpu) RH(cpu, R_ECX)
247#define DH(cpu) RH(cpu, R_EDX)
248#define BH(cpu) RH(cpu, R_EBX)
249
250
251bool x86_read_segment_descriptor(struct CPUState *cpu,
252 struct x86_segment_descriptor *desc,
253 x68_segment_selector sel);
254bool x86_write_segment_descriptor(struct CPUState *cpu,
255 struct x86_segment_descriptor *desc,
256 x68_segment_selector sel);
257
258bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
259 int gate);
260
261
262bool x86_is_protected(struct CPUState *cpu);
263bool x86_is_real(struct CPUState *cpu);
264bool x86_is_v8086(struct CPUState *cpu);
265bool x86_is_long_mode(struct CPUState *cpu);
266bool x86_is_long64_mode(struct CPUState *cpu);
267bool x86_is_paging_mode(struct CPUState *cpu);
268bool x86_is_pae_enabled(struct CPUState *cpu);
269
270enum X86Seg;
271target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, enum X86Seg seg);
272target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size,
273 enum X86Seg seg);
274target_ulong linear_rip(struct CPUState *cpu, target_ulong rip);
275
276static inline uint64_t rdtscp(void)
277{
278 uint64_t tsc;
279 __asm__ __volatile__("rdtscp; "
280 "shl $32,%%rdx; "
281 "or %%rdx,%%rax"
282 : "=a"(tsc)
283 :
284 : "%rcx", "%rdx");
285
286 return tsc;
287}
288
289#endif
290