1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/cpu-all.h"
23#include "exec/helper-proto.h"
24#include "exec/exec-all.h"
25#include "exec/cpu_ldst.h"
26
27
28
29#if defined(CONFIG_USER_ONLY)
30
31void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
32{
33}
34
35void helper_vmmcall(CPUX86State *env)
36{
37}
38
39void helper_vmload(CPUX86State *env, int aflag)
40{
41}
42
43void helper_vmsave(CPUX86State *env, int aflag)
44{
45}
46
47void helper_stgi(CPUX86State *env)
48{
49}
50
51void helper_clgi(CPUX86State *env)
52{
53}
54
55void helper_skinit(CPUX86State *env)
56{
57}
58
59void helper_invlpga(CPUX86State *env, int aflag)
60{
61}
62
63void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
64 uintptr_t retaddr)
65{
66}
67
68void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
69 uint64_t param)
70{
71}
72
73void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
74 uint64_t param, uintptr_t retaddr)
75{
76}
77
78void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
79 uint32_t next_eip_addend)
80{
81}
82#else
83
84static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
85 const SegmentCache *sc)
86{
87 CPUState *cs = CPU(x86_env_get_cpu(env));
88
89 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
90 sc->selector);
91 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
92 sc->base);
93 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
94 sc->limit);
95 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
96 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
97}
98
99static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
100 SegmentCache *sc)
101{
102 CPUState *cs = CPU(x86_env_get_cpu(env));
103 unsigned int flags;
104
105 sc->selector = x86_lduw_phys(cs,
106 addr + offsetof(struct vmcb_seg, selector));
107 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
108 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
109 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
110 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
111}
112
113static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
114 int seg_reg)
115{
116 SegmentCache sc1, *sc = &sc1;
117
118 svm_load_seg(env, addr, sc);
119 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
120 sc->base, sc->limit, sc->flags);
121}
122
123void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
124{
125 CPUState *cs = CPU(x86_env_get_cpu(env));
126 target_ulong addr;
127 uint32_t event_inj;
128 uint32_t int_ctl;
129
130 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
131
132 if (aflag == 2) {
133 addr = env->regs[R_EAX];
134 } else {
135 addr = (uint32_t)env->regs[R_EAX];
136 }
137
138 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
139
140 env->vm_vmcb = addr;
141
142
143 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
144 env->gdt.base);
145 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
146 env->gdt.limit);
147
148 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
149 env->idt.base);
150 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
151 env->idt.limit);
152
153 x86_stq_phys(cs,
154 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
155 x86_stq_phys(cs,
156 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
157 x86_stq_phys(cs,
158 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
159 x86_stq_phys(cs,
160 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
161 x86_stq_phys(cs,
162 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
163 x86_stq_phys(cs,
164 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
165
166 x86_stq_phys(cs,
167 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
168 x86_stq_phys(cs,
169 env->vm_hsave + offsetof(struct vmcb, save.rflags),
170 cpu_compute_eflags(env));
171
172 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
173 &env->segs[R_ES]);
174 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
175 &env->segs[R_CS]);
176 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
177 &env->segs[R_SS]);
178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
179 &env->segs[R_DS]);
180
181 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
182 env->eip + next_eip_addend);
183 x86_stq_phys(cs,
184 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
185 x86_stq_phys(cs,
186 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
187
188
189
190 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
191 control.intercept));
192 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
193 offsetof(struct vmcb,
194 control.intercept_cr_read));
195 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
196 offsetof(struct vmcb,
197 control.intercept_cr_write));
198 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
199 offsetof(struct vmcb,
200 control.intercept_dr_read));
201 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
202 offsetof(struct vmcb,
203 control.intercept_dr_write));
204 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
205 offsetof(struct vmcb,
206 control.intercept_exceptions
207 ));
208
209
210 env->hflags |= HF_SVMI_MASK;
211
212 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
213 offsetof(struct vmcb, control.tsc_offset));
214
215 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
216 save.gdtr.base));
217 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
218 save.gdtr.limit));
219
220 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
221 save.idtr.base));
222 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
223 save.idtr.limit));
224
225
226 x86_stq_phys(cs,
227 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
228
229 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
230 env->vm_vmcb + offsetof(struct vmcb,
231 save.cr0)));
232 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
233 env->vm_vmcb + offsetof(struct vmcb,
234 save.cr4)));
235 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
236 env->vm_vmcb + offsetof(struct vmcb,
237 save.cr3)));
238 env->cr[2] = x86_ldq_phys(cs,
239 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
240 int_ctl = x86_ldl_phys(cs,
241 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
242 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
243 if (int_ctl & V_INTR_MASKING_MASK) {
244 env->v_tpr = int_ctl & V_TPR_MASK;
245 env->hflags2 |= HF2_VINTR_MASK;
246 if (env->eflags & IF_MASK) {
247 env->hflags2 |= HF2_HIF_MASK;
248 }
249 }
250
251 cpu_load_efer(env,
252 x86_ldq_phys(cs,
253 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
254 env->eflags = 0;
255 cpu_load_eflags(env, x86_ldq_phys(cs,
256 env->vm_vmcb + offsetof(struct vmcb,
257 save.rflags)),
258 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
259
260 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
261 R_ES);
262 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
263 R_CS);
264 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
265 R_SS);
266 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
267 R_DS);
268
269 env->eip = x86_ldq_phys(cs,
270 env->vm_vmcb + offsetof(struct vmcb, save.rip));
271
272 env->regs[R_ESP] = x86_ldq_phys(cs,
273 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
274 env->regs[R_EAX] = x86_ldq_phys(cs,
275 env->vm_vmcb + offsetof(struct vmcb, save.rax));
276 env->dr[7] = x86_ldq_phys(cs,
277 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
278 env->dr[6] = x86_ldq_phys(cs,
279 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
280
281
282
283 switch (x86_ldub_phys(cs,
284 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
285 case TLB_CONTROL_DO_NOTHING:
286 break;
287 case TLB_CONTROL_FLUSH_ALL_ASID:
288
289 tlb_flush(cs, 1);
290 break;
291 }
292
293 env->hflags2 |= HF2_GIF_MASK;
294
295 if (int_ctl & V_IRQ_MASK) {
296 CPUState *cs = CPU(x86_env_get_cpu(env));
297
298 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
299 }
300
301
302 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
303 control.event_inj));
304 if (event_inj & SVM_EVTINJ_VALID) {
305 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
306 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
307 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
308 offsetof(struct vmcb,
309 control.event_inj_err));
310
311 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
312
313 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
314 case SVM_EVTINJ_TYPE_INTR:
315 cs->exception_index = vector;
316 env->error_code = event_inj_err;
317 env->exception_is_int = 0;
318 env->exception_next_eip = -1;
319 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
320
321 do_interrupt_x86_hardirq(env, vector, 1);
322 break;
323 case SVM_EVTINJ_TYPE_NMI:
324 cs->exception_index = EXCP02_NMI;
325 env->error_code = event_inj_err;
326 env->exception_is_int = 0;
327 env->exception_next_eip = env->eip;
328 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
329 cpu_loop_exit(cs);
330 break;
331 case SVM_EVTINJ_TYPE_EXEPT:
332 cs->exception_index = vector;
333 env->error_code = event_inj_err;
334 env->exception_is_int = 0;
335 env->exception_next_eip = -1;
336 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
337 cpu_loop_exit(cs);
338 break;
339 case SVM_EVTINJ_TYPE_SOFT:
340 cs->exception_index = vector;
341 env->error_code = event_inj_err;
342 env->exception_is_int = 1;
343 env->exception_next_eip = env->eip;
344 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
345 cpu_loop_exit(cs);
346 break;
347 }
348 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
349 env->error_code);
350 }
351}
352
353void helper_vmmcall(CPUX86State *env)
354{
355 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
356 raise_exception(env, EXCP06_ILLOP);
357}
358
359void helper_vmload(CPUX86State *env, int aflag)
360{
361 CPUState *cs = CPU(x86_env_get_cpu(env));
362 target_ulong addr;
363
364 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
365
366 if (aflag == 2) {
367 addr = env->regs[R_EAX];
368 } else {
369 addr = (uint32_t)env->regs[R_EAX];
370 }
371
372 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
373 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
374 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
375 save.fs.base)),
376 env->segs[R_FS].base);
377
378 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
379 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
380 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
381 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
382
383#ifdef TARGET_X86_64
384 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
385 save.kernel_gs_base));
386 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
387 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
388 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
389#endif
390 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
391 env->sysenter_cs = x86_ldq_phys(cs,
392 addr + offsetof(struct vmcb, save.sysenter_cs));
393 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
394 save.sysenter_esp));
395 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
396 save.sysenter_eip));
397}
398
399void helper_vmsave(CPUX86State *env, int aflag)
400{
401 CPUState *cs = CPU(x86_env_get_cpu(env));
402 target_ulong addr;
403
404 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
405
406 if (aflag == 2) {
407 addr = env->regs[R_EAX];
408 } else {
409 addr = (uint32_t)env->regs[R_EAX];
410 }
411
412 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
413 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
414 addr, x86_ldq_phys(cs,
415 addr + offsetof(struct vmcb, save.fs.base)),
416 env->segs[R_FS].base);
417
418 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
419 &env->segs[R_FS]);
420 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
421 &env->segs[R_GS]);
422 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
423 &env->tr);
424 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
425 &env->ldt);
426
427#ifdef TARGET_X86_64
428 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
429 env->kernelgsbase);
430 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
431 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
432 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
433#endif
434 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
435 x86_stq_phys(cs,
436 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
437 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
438 env->sysenter_esp);
439 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
440 env->sysenter_eip);
441}
442
443void helper_stgi(CPUX86State *env)
444{
445 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
446 env->hflags2 |= HF2_GIF_MASK;
447}
448
449void helper_clgi(CPUX86State *env)
450{
451 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
452 env->hflags2 &= ~HF2_GIF_MASK;
453}
454
455void helper_skinit(CPUX86State *env)
456{
457 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
458
459 raise_exception(env, EXCP06_ILLOP);
460}
461
462void helper_invlpga(CPUX86State *env, int aflag)
463{
464 X86CPU *cpu = x86_env_get_cpu(env);
465 target_ulong addr;
466
467 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
468
469 if (aflag == 2) {
470 addr = env->regs[R_EAX];
471 } else {
472 addr = (uint32_t)env->regs[R_EAX];
473 }
474
475
476
477 tlb_flush_page(CPU(cpu), addr);
478}
479
480void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
481 uint64_t param, uintptr_t retaddr)
482{
483 CPUState *cs = CPU(x86_env_get_cpu(env));
484
485 if (likely(!(env->hflags & HF_SVMI_MASK))) {
486 return;
487 }
488 switch (type) {
489 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
490 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
491 cpu_vmexit(env, type, param, retaddr);
492 }
493 break;
494 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
495 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
496 cpu_vmexit(env, type, param, retaddr);
497 }
498 break;
499 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
500 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
501 cpu_vmexit(env, type, param, retaddr);
502 }
503 break;
504 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
505 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
506 cpu_vmexit(env, type, param, retaddr);
507 }
508 break;
509 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
510 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
511 cpu_vmexit(env, type, param, retaddr);
512 }
513 break;
514 case SVM_EXIT_MSR:
515 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
516
517 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
518 offsetof(struct vmcb,
519 control.msrpm_base_pa));
520 uint32_t t0, t1;
521
522 switch ((uint32_t)env->regs[R_ECX]) {
523 case 0 ... 0x1fff:
524 t0 = (env->regs[R_ECX] * 2) % 8;
525 t1 = (env->regs[R_ECX] * 2) / 8;
526 break;
527 case 0xc0000000 ... 0xc0001fff:
528 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
529 t1 = (t0 / 8);
530 t0 %= 8;
531 break;
532 case 0xc0010000 ... 0xc0011fff:
533 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
534 t1 = (t0 / 8);
535 t0 %= 8;
536 break;
537 default:
538 cpu_vmexit(env, type, param, retaddr);
539 t0 = 0;
540 t1 = 0;
541 break;
542 }
543 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
544 cpu_vmexit(env, type, param, retaddr);
545 }
546 }
547 break;
548 default:
549 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
550 cpu_vmexit(env, type, param, retaddr);
551 }
552 break;
553 }
554}
555
556void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
557 uint64_t param)
558{
559 cpu_svm_check_intercept_param(env, type, param, GETPC());
560}
561
562void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
563 uint32_t next_eip_addend)
564{
565 CPUState *cs = CPU(x86_env_get_cpu(env));
566
567 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
568
569 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
570 offsetof(struct vmcb, control.iopm_base_pa));
571 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
572
573 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
574
575 x86_stq_phys(cs,
576 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
577 env->eip + next_eip_addend);
578 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
579 }
580 }
581}
582
583
584void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
585 uintptr_t retaddr)
586{
587 CPUState *cs = CPU(x86_env_get_cpu(env));
588 uint32_t int_ctl;
589
590 if (retaddr) {
591 cpu_restore_state(cs, retaddr);
592 }
593
594 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
595 PRIx64 ", " TARGET_FMT_lx ")!\n",
596 exit_code, exit_info_1,
597 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
598 control.exit_info_2)),
599 env->eip);
600
601 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
602 x86_stl_phys(cs,
603 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
604 SVM_INTERRUPT_SHADOW_MASK);
605 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
606 } else {
607 x86_stl_phys(cs,
608 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
609 }
610
611
612 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
613 &env->segs[R_ES]);
614 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
615 &env->segs[R_CS]);
616 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
617 &env->segs[R_SS]);
618 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
619 &env->segs[R_DS]);
620
621 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
622 env->gdt.base);
623 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
624 env->gdt.limit);
625
626 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
627 env->idt.base);
628 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
629 env->idt.limit);
630
631 x86_stq_phys(cs,
632 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
633 x86_stq_phys(cs,
634 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
635 x86_stq_phys(cs,
636 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
637 x86_stq_phys(cs,
638 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
639 x86_stq_phys(cs,
640 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
641
642 int_ctl = x86_ldl_phys(cs,
643 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
644 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
645 int_ctl |= env->v_tpr & V_TPR_MASK;
646 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
647 int_ctl |= V_IRQ_MASK;
648 }
649 x86_stl_phys(cs,
650 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
651
652 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
653 cpu_compute_eflags(env));
654 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
655 env->eip);
656 x86_stq_phys(cs,
657 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
658 x86_stq_phys(cs,
659 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
660 x86_stq_phys(cs,
661 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
662 x86_stq_phys(cs,
663 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
664 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
665 env->hflags & HF_CPL_MASK);
666
667
668 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
669 env->hflags &= ~HF_SVMI_MASK;
670 env->intercept = 0;
671 env->intercept_exceptions = 0;
672 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
673 env->tsc_offset = 0;
674
675 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
676 save.gdtr.base));
677 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
678 save.gdtr.limit));
679
680 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
681 save.idtr.base));
682 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
683 save.idtr.limit));
684
685 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
686 env->vm_hsave + offsetof(struct vmcb,
687 save.cr0)) |
688 CR0_PE_MASK);
689 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
690 env->vm_hsave + offsetof(struct vmcb,
691 save.cr4)));
692 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
693 env->vm_hsave + offsetof(struct vmcb,
694 save.cr3)));
695
696
697 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
698 save.efer)));
699 env->eflags = 0;
700 cpu_load_eflags(env, x86_ldq_phys(cs,
701 env->vm_hsave + offsetof(struct vmcb,
702 save.rflags)),
703 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
704 VM_MASK));
705
706 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
707 R_ES);
708 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
709 R_CS);
710 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
711 R_SS);
712 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
713 R_DS);
714
715 env->eip = x86_ldq_phys(cs,
716 env->vm_hsave + offsetof(struct vmcb, save.rip));
717 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
718 offsetof(struct vmcb, save.rsp));
719 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
720 offsetof(struct vmcb, save.rax));
721
722 env->dr[6] = x86_ldq_phys(cs,
723 env->vm_hsave + offsetof(struct vmcb, save.dr6));
724 env->dr[7] = x86_ldq_phys(cs,
725 env->vm_hsave + offsetof(struct vmcb, save.dr7));
726
727
728 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
729 exit_code);
730 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
731 exit_info_1);
732
733 x86_stl_phys(cs,
734 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
735 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
736 control.event_inj)));
737 x86_stl_phys(cs,
738 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
739 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
740 control.event_inj_err)));
741 x86_stl_phys(cs,
742 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
743
744 env->hflags2 &= ~HF2_GIF_MASK;
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764 cs->exception_index = -1;
765 env->error_code = 0;
766 env->old_exception = -1;
767
768 cpu_loop_exit(cs);
769}
770
771#endif
772