1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu-common.h"
22#include "qemu.h"
23#include "cpu_loop-common.h"
24
25
26
27
28uint64_t cpu_get_tsc(CPUX86State *env)
29{
30 return cpu_get_host_ticks();
31}
32
33static void write_dt(void *ptr, unsigned long addr, unsigned long limit,
34 int flags)
35{
36 unsigned int e1, e2;
37 uint32_t *p;
38 e1 = (addr << 16) | (limit & 0xffff);
39 e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000);
40 e2 |= flags;
41 p = ptr;
42 p[0] = tswap32(e1);
43 p[1] = tswap32(e2);
44}
45
46static uint64_t *idt_table;
47#ifdef TARGET_X86_64
48static void set_gate64(void *ptr, unsigned int type, unsigned int dpl,
49 uint64_t addr, unsigned int sel)
50{
51 uint32_t *p, e1, e2;
52 e1 = (addr & 0xffff) | (sel << 16);
53 e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8);
54 p = ptr;
55 p[0] = tswap32(e1);
56 p[1] = tswap32(e2);
57 p[2] = tswap32(addr >> 32);
58 p[3] = 0;
59}
60
61static void set_idt(int n, unsigned int dpl)
62{
63 set_gate64(idt_table + n * 2, 0, dpl, 0, 0);
64}
65#else
66static void set_gate(void *ptr, unsigned int type, unsigned int dpl,
67 uint32_t addr, unsigned int sel)
68{
69 uint32_t *p, e1, e2;
70 e1 = (addr & 0xffff) | (sel << 16);
71 e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8);
72 p = ptr;
73 p[0] = tswap32(e1);
74 p[1] = tswap32(e2);
75}
76
77
78static void set_idt(int n, unsigned int dpl)
79{
80 set_gate(idt_table + n, 0, dpl, 0, 0);
81}
82#endif
83
84void cpu_loop(CPUX86State *env)
85{
86 CPUState *cs = env_cpu(env);
87 int trapnr;
88 abi_ulong pc;
89 abi_ulong ret;
90 target_siginfo_t info;
91
92 for(;;) {
93 cpu_exec_start(cs);
94 trapnr = cpu_exec(cs);
95 cpu_exec_end(cs);
96 process_queued_cpu_work(cs);
97
98 switch(trapnr) {
99 case 0x80:
100
101 ret = do_syscall(env,
102 env->regs[R_EAX],
103 env->regs[R_EBX],
104 env->regs[R_ECX],
105 env->regs[R_EDX],
106 env->regs[R_ESI],
107 env->regs[R_EDI],
108 env->regs[R_EBP],
109 0, 0);
110 if (ret == -TARGET_ERESTARTSYS) {
111 env->eip -= 2;
112 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
113 env->regs[R_EAX] = ret;
114 }
115 break;
116#ifndef TARGET_ABI32
117 case EXCP_SYSCALL:
118
119 ret = do_syscall(env,
120 env->regs[R_EAX],
121 env->regs[R_EDI],
122 env->regs[R_ESI],
123 env->regs[R_EDX],
124 env->regs[10],
125 env->regs[8],
126 env->regs[9],
127 0, 0);
128 if (ret == -TARGET_ERESTARTSYS) {
129 env->eip -= 2;
130 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
131 env->regs[R_EAX] = ret;
132 }
133 break;
134#endif
135 case EXCP0B_NOSEG:
136 case EXCP0C_STACK:
137 info.si_signo = TARGET_SIGBUS;
138 info.si_errno = 0;
139 info.si_code = TARGET_SI_KERNEL;
140 info._sifields._sigfault._addr = 0;
141 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
142 break;
143 case EXCP0D_GPF:
144
145#ifndef TARGET_X86_64
146 if (env->eflags & VM_MASK) {
147 handle_vm86_fault(env);
148 } else
149#endif
150 {
151 info.si_signo = TARGET_SIGSEGV;
152 info.si_errno = 0;
153 info.si_code = TARGET_SI_KERNEL;
154 info._sifields._sigfault._addr = 0;
155 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
156 }
157 break;
158 case EXCP0E_PAGE:
159 info.si_signo = TARGET_SIGSEGV;
160 info.si_errno = 0;
161 if (!(env->error_code & 1))
162 info.si_code = TARGET_SEGV_MAPERR;
163 else
164 info.si_code = TARGET_SEGV_ACCERR;
165 info._sifields._sigfault._addr = env->cr[2];
166 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
167 break;
168 case EXCP00_DIVZ:
169#ifndef TARGET_X86_64
170 if (env->eflags & VM_MASK) {
171 handle_vm86_trap(env, trapnr);
172 } else
173#endif
174 {
175
176 info.si_signo = TARGET_SIGFPE;
177 info.si_errno = 0;
178 info.si_code = TARGET_FPE_INTDIV;
179 info._sifields._sigfault._addr = env->eip;
180 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
181 }
182 break;
183 case EXCP01_DB:
184 case EXCP03_INT3:
185#ifndef TARGET_X86_64
186 if (env->eflags & VM_MASK) {
187 handle_vm86_trap(env, trapnr);
188 } else
189#endif
190 {
191 info.si_signo = TARGET_SIGTRAP;
192 info.si_errno = 0;
193 if (trapnr == EXCP01_DB) {
194 info.si_code = TARGET_TRAP_BRKPT;
195 info._sifields._sigfault._addr = env->eip;
196 } else {
197 info.si_code = TARGET_SI_KERNEL;
198 info._sifields._sigfault._addr = 0;
199 }
200 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
201 }
202 break;
203 case EXCP04_INTO:
204 case EXCP05_BOUND:
205#ifndef TARGET_X86_64
206 if (env->eflags & VM_MASK) {
207 handle_vm86_trap(env, trapnr);
208 } else
209#endif
210 {
211 info.si_signo = TARGET_SIGSEGV;
212 info.si_errno = 0;
213 info.si_code = TARGET_SI_KERNEL;
214 info._sifields._sigfault._addr = 0;
215 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
216 }
217 break;
218 case EXCP06_ILLOP:
219 info.si_signo = TARGET_SIGILL;
220 info.si_errno = 0;
221 info.si_code = TARGET_ILL_ILLOPN;
222 info._sifields._sigfault._addr = env->eip;
223 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
224 break;
225 case EXCP_INTERRUPT:
226
227 break;
228 case EXCP_DEBUG:
229 info.si_signo = TARGET_SIGTRAP;
230 info.si_errno = 0;
231 info.si_code = TARGET_TRAP_BRKPT;
232 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
233 break;
234 case EXCP_ATOMIC:
235 cpu_exec_step_atomic(cs);
236 break;
237 default:
238 pc = env->segs[R_CS].base + env->eip;
239 EXCP_DUMP(env, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
240 (long)pc, trapnr);
241 abort();
242 }
243 process_pending_signals(env);
244 }
245}
246
247void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
248{
249 env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;
250 env->hflags |= HF_PE_MASK | HF_CPL_MASK;
251 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
252 env->cr[4] |= CR4_OSFXSR_MASK;
253 env->hflags |= HF_OSFXSR_MASK;
254 }
255#ifndef TARGET_ABI32
256
257 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
258 fprintf(stderr, "The selected x86 CPU does not support 64 bit mode\n");
259 exit(EXIT_FAILURE);
260 }
261 env->cr[4] |= CR4_PAE_MASK;
262 env->efer |= MSR_EFER_LMA | MSR_EFER_LME;
263 env->hflags |= HF_LMA_MASK;
264#endif
265
266
267 env->eflags |= IF_MASK;
268
269
270#ifndef TARGET_ABI32
271 env->regs[R_EAX] = regs->rax;
272 env->regs[R_EBX] = regs->rbx;
273 env->regs[R_ECX] = regs->rcx;
274 env->regs[R_EDX] = regs->rdx;
275 env->regs[R_ESI] = regs->rsi;
276 env->regs[R_EDI] = regs->rdi;
277 env->regs[R_EBP] = regs->rbp;
278 env->regs[R_ESP] = regs->rsp;
279 env->eip = regs->rip;
280#else
281 env->regs[R_EAX] = regs->eax;
282 env->regs[R_EBX] = regs->ebx;
283 env->regs[R_ECX] = regs->ecx;
284 env->regs[R_EDX] = regs->edx;
285 env->regs[R_ESI] = regs->esi;
286 env->regs[R_EDI] = regs->edi;
287 env->regs[R_EBP] = regs->ebp;
288 env->regs[R_ESP] = regs->esp;
289 env->eip = regs->eip;
290#endif
291
292
293#ifndef TARGET_ABI32
294 env->idt.limit = 511;
295#else
296 env->idt.limit = 255;
297#endif
298 env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1),
299 PROT_READ|PROT_WRITE,
300 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
301 idt_table = g2h(env->idt.base);
302 set_idt(0, 0);
303 set_idt(1, 0);
304 set_idt(2, 0);
305 set_idt(3, 3);
306 set_idt(4, 3);
307 set_idt(5, 0);
308 set_idt(6, 0);
309 set_idt(7, 0);
310 set_idt(8, 0);
311 set_idt(9, 0);
312 set_idt(10, 0);
313 set_idt(11, 0);
314 set_idt(12, 0);
315 set_idt(13, 0);
316 set_idt(14, 0);
317 set_idt(15, 0);
318 set_idt(16, 0);
319 set_idt(17, 0);
320 set_idt(18, 0);
321 set_idt(19, 0);
322 set_idt(0x80, 3);
323
324
325 {
326 uint64_t *gdt_table;
327 env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES,
328 PROT_READ|PROT_WRITE,
329 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
330 env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1;
331 gdt_table = g2h(env->gdt.base);
332#ifdef TARGET_ABI32
333 write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
334 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
335 (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
336#else
337
338 write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
339 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
340 DESC_L_MASK |
341 (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
342#endif
343 write_dt(&gdt_table[__USER_DS >> 3], 0, 0xfffff,
344 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
345 (3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT));
346 }
347 cpu_x86_load_seg(env, R_CS, __USER_CS);
348 cpu_x86_load_seg(env, R_SS, __USER_DS);
349#ifdef TARGET_ABI32
350 cpu_x86_load_seg(env, R_DS, __USER_DS);
351 cpu_x86_load_seg(env, R_ES, __USER_DS);
352 cpu_x86_load_seg(env, R_FS, __USER_DS);
353 cpu_x86_load_seg(env, R_GS, __USER_DS);
354
355 env->segs[R_FS].selector = 0;
356#else
357 cpu_x86_load_seg(env, R_DS, 0);
358 cpu_x86_load_seg(env, R_ES, 0);
359 cpu_x86_load_seg(env, R_FS, 0);
360 cpu_x86_load_seg(env, R_GS, 0);
361#endif
362}
363