1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/cpu-all.h"
23#include "exec/helper-proto.h"
24#include "exec/cpu_ldst.h"
25
26
27
28#if defined(CONFIG_USER_ONLY)
29
30void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
31{
32}
33
34void helper_vmmcall(CPUX86State *env)
35{
36}
37
38void helper_vmload(CPUX86State *env, int aflag)
39{
40}
41
42void helper_vmsave(CPUX86State *env, int aflag)
43{
44}
45
46void helper_stgi(CPUX86State *env)
47{
48}
49
50void helper_clgi(CPUX86State *env)
51{
52}
53
54void helper_skinit(CPUX86State *env)
55{
56}
57
58void helper_invlpga(CPUX86State *env, int aflag)
59{
60}
61
62void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
63{
64}
65
66void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
67{
68}
69
70void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
71 uint64_t param)
72{
73}
74
75void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
76 uint64_t param)
77{
78}
79
80void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
81 uint32_t next_eip_addend)
82{
83}
84#else
85
86static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
87 const SegmentCache *sc)
88{
89 CPUState *cs = CPU(x86_env_get_cpu(env));
90
91 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
92 sc->selector);
93 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
94 sc->base);
95 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
96 sc->limit);
97 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
98 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
99}
100
101static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
102 SegmentCache *sc)
103{
104 CPUState *cs = CPU(x86_env_get_cpu(env));
105 unsigned int flags;
106
107 sc->selector = x86_lduw_phys(cs,
108 addr + offsetof(struct vmcb_seg, selector));
109 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
110 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
111 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
112 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
113}
114
115static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
116 int seg_reg)
117{
118 SegmentCache sc1, *sc = &sc1;
119
120 svm_load_seg(env, addr, sc);
121 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
122 sc->base, sc->limit, sc->flags);
123}
124
125void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
126{
127 CPUState *cs = CPU(x86_env_get_cpu(env));
128 target_ulong addr;
129 uint32_t event_inj;
130 uint32_t int_ctl;
131
132 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
133
134 if (aflag == 2) {
135 addr = env->regs[R_EAX];
136 } else {
137 addr = (uint32_t)env->regs[R_EAX];
138 }
139
140 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
141
142 env->vm_vmcb = addr;
143
144
145 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
146 env->gdt.base);
147 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
148 env->gdt.limit);
149
150 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
151 env->idt.base);
152 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
153 env->idt.limit);
154
155 x86_stq_phys(cs,
156 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
157 x86_stq_phys(cs,
158 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
159 x86_stq_phys(cs,
160 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
161 x86_stq_phys(cs,
162 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
163 x86_stq_phys(cs,
164 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
165 x86_stq_phys(cs,
166 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
167
168 x86_stq_phys(cs,
169 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
170 x86_stq_phys(cs,
171 env->vm_hsave + offsetof(struct vmcb, save.rflags),
172 cpu_compute_eflags(env));
173
174 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
175 &env->segs[R_ES]);
176 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
177 &env->segs[R_CS]);
178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
179 &env->segs[R_SS]);
180 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
181 &env->segs[R_DS]);
182
183 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
184 env->eip + next_eip_addend);
185 x86_stq_phys(cs,
186 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
187 x86_stq_phys(cs,
188 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
189
190
191
192 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
193 control.intercept));
194 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
195 offsetof(struct vmcb,
196 control.intercept_cr_read));
197 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
198 offsetof(struct vmcb,
199 control.intercept_cr_write));
200 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
201 offsetof(struct vmcb,
202 control.intercept_dr_read));
203 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
204 offsetof(struct vmcb,
205 control.intercept_dr_write));
206 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
207 offsetof(struct vmcb,
208 control.intercept_exceptions
209 ));
210
211
212 env->hflags |= HF_SVMI_MASK;
213
214 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
215 offsetof(struct vmcb, control.tsc_offset));
216
217 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
218 save.gdtr.base));
219 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
220 save.gdtr.limit));
221
222 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
223 save.idtr.base));
224 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
225 save.idtr.limit));
226
227
228 x86_stq_phys(cs,
229 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
230
231 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
232 env->vm_vmcb + offsetof(struct vmcb,
233 save.cr0)));
234 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
235 env->vm_vmcb + offsetof(struct vmcb,
236 save.cr4)));
237 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
238 env->vm_vmcb + offsetof(struct vmcb,
239 save.cr3)));
240 env->cr[2] = x86_ldq_phys(cs,
241 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
242 int_ctl = x86_ldl_phys(cs,
243 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
244 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
245 if (int_ctl & V_INTR_MASKING_MASK) {
246 env->v_tpr = int_ctl & V_TPR_MASK;
247 env->hflags2 |= HF2_VINTR_MASK;
248 if (env->eflags & IF_MASK) {
249 env->hflags2 |= HF2_HIF_MASK;
250 }
251 }
252
253 cpu_load_efer(env,
254 x86_ldq_phys(cs,
255 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
256 env->eflags = 0;
257 cpu_load_eflags(env, x86_ldq_phys(cs,
258 env->vm_vmcb + offsetof(struct vmcb,
259 save.rflags)),
260 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
261
262 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
263 R_ES);
264 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
265 R_CS);
266 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
267 R_SS);
268 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
269 R_DS);
270
271 env->eip = x86_ldq_phys(cs,
272 env->vm_vmcb + offsetof(struct vmcb, save.rip));
273
274 env->regs[R_ESP] = x86_ldq_phys(cs,
275 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
276 env->regs[R_EAX] = x86_ldq_phys(cs,
277 env->vm_vmcb + offsetof(struct vmcb, save.rax));
278 env->dr[7] = x86_ldq_phys(cs,
279 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
280 env->dr[6] = x86_ldq_phys(cs,
281 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
282
283
284
285 switch (x86_ldub_phys(cs,
286 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
287 case TLB_CONTROL_DO_NOTHING:
288 break;
289 case TLB_CONTROL_FLUSH_ALL_ASID:
290
291 tlb_flush(cs, 1);
292 break;
293 }
294
295 env->hflags2 |= HF2_GIF_MASK;
296
297 if (int_ctl & V_IRQ_MASK) {
298 CPUState *cs = CPU(x86_env_get_cpu(env));
299
300 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
301 }
302
303
304 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
305 control.event_inj));
306 if (event_inj & SVM_EVTINJ_VALID) {
307 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
308 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
309 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
310 offsetof(struct vmcb,
311 control.event_inj_err));
312
313 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
314
315 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
316 case SVM_EVTINJ_TYPE_INTR:
317 cs->exception_index = vector;
318 env->error_code = event_inj_err;
319 env->exception_is_int = 0;
320 env->exception_next_eip = -1;
321 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
322
323 do_interrupt_x86_hardirq(env, vector, 1);
324 break;
325 case SVM_EVTINJ_TYPE_NMI:
326 cs->exception_index = EXCP02_NMI;
327 env->error_code = event_inj_err;
328 env->exception_is_int = 0;
329 env->exception_next_eip = env->eip;
330 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
331 cpu_loop_exit(cs);
332 break;
333 case SVM_EVTINJ_TYPE_EXEPT:
334 cs->exception_index = vector;
335 env->error_code = event_inj_err;
336 env->exception_is_int = 0;
337 env->exception_next_eip = -1;
338 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
339 cpu_loop_exit(cs);
340 break;
341 case SVM_EVTINJ_TYPE_SOFT:
342 cs->exception_index = vector;
343 env->error_code = event_inj_err;
344 env->exception_is_int = 1;
345 env->exception_next_eip = env->eip;
346 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
347 cpu_loop_exit(cs);
348 break;
349 }
350 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
351 env->error_code);
352 }
353}
354
355void helper_vmmcall(CPUX86State *env)
356{
357 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
358 raise_exception(env, EXCP06_ILLOP);
359}
360
361void helper_vmload(CPUX86State *env, int aflag)
362{
363 CPUState *cs = CPU(x86_env_get_cpu(env));
364 target_ulong addr;
365
366 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
367
368 if (aflag == 2) {
369 addr = env->regs[R_EAX];
370 } else {
371 addr = (uint32_t)env->regs[R_EAX];
372 }
373
374 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
375 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
376 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
377 save.fs.base)),
378 env->segs[R_FS].base);
379
380 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
381 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
382 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
383 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
384
385#ifdef TARGET_X86_64
386 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
387 save.kernel_gs_base));
388 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
389 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
390 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
391#endif
392 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
393 env->sysenter_cs = x86_ldq_phys(cs,
394 addr + offsetof(struct vmcb, save.sysenter_cs));
395 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
396 save.sysenter_esp));
397 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
398 save.sysenter_eip));
399}
400
401void helper_vmsave(CPUX86State *env, int aflag)
402{
403 CPUState *cs = CPU(x86_env_get_cpu(env));
404 target_ulong addr;
405
406 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
407
408 if (aflag == 2) {
409 addr = env->regs[R_EAX];
410 } else {
411 addr = (uint32_t)env->regs[R_EAX];
412 }
413
414 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
415 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
416 addr, x86_ldq_phys(cs,
417 addr + offsetof(struct vmcb, save.fs.base)),
418 env->segs[R_FS].base);
419
420 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
421 &env->segs[R_FS]);
422 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
423 &env->segs[R_GS]);
424 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
425 &env->tr);
426 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
427 &env->ldt);
428
429#ifdef TARGET_X86_64
430 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
431 env->kernelgsbase);
432 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
433 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
434 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
435#endif
436 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
437 x86_stq_phys(cs,
438 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
439 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
440 env->sysenter_esp);
441 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
442 env->sysenter_eip);
443}
444
445void helper_stgi(CPUX86State *env)
446{
447 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
448 env->hflags2 |= HF2_GIF_MASK;
449}
450
451void helper_clgi(CPUX86State *env)
452{
453 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
454 env->hflags2 &= ~HF2_GIF_MASK;
455}
456
457void helper_skinit(CPUX86State *env)
458{
459 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
460
461 raise_exception(env, EXCP06_ILLOP);
462}
463
464void helper_invlpga(CPUX86State *env, int aflag)
465{
466 X86CPU *cpu = x86_env_get_cpu(env);
467 target_ulong addr;
468
469 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
470
471 if (aflag == 2) {
472 addr = env->regs[R_EAX];
473 } else {
474 addr = (uint32_t)env->regs[R_EAX];
475 }
476
477
478
479 tlb_flush_page(CPU(cpu), addr);
480}
481
482void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
483 uint64_t param)
484{
485 CPUState *cs = CPU(x86_env_get_cpu(env));
486
487 if (likely(!(env->hflags & HF_SVMI_MASK))) {
488 return;
489 }
490 switch (type) {
491 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
492 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
493 helper_vmexit(env, type, param);
494 }
495 break;
496 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
497 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
498 helper_vmexit(env, type, param);
499 }
500 break;
501 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
502 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
503 helper_vmexit(env, type, param);
504 }
505 break;
506 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
507 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
508 helper_vmexit(env, type, param);
509 }
510 break;
511 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
512 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
513 helper_vmexit(env, type, param);
514 }
515 break;
516 case SVM_EXIT_MSR:
517 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
518
519 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
520 offsetof(struct vmcb,
521 control.msrpm_base_pa));
522 uint32_t t0, t1;
523
524 switch ((uint32_t)env->regs[R_ECX]) {
525 case 0 ... 0x1fff:
526 t0 = (env->regs[R_ECX] * 2) % 8;
527 t1 = (env->regs[R_ECX] * 2) / 8;
528 break;
529 case 0xc0000000 ... 0xc0001fff:
530 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
531 t1 = (t0 / 8);
532 t0 %= 8;
533 break;
534 case 0xc0010000 ... 0xc0011fff:
535 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
536 t1 = (t0 / 8);
537 t0 %= 8;
538 break;
539 default:
540 helper_vmexit(env, type, param);
541 t0 = 0;
542 t1 = 0;
543 break;
544 }
545 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
546 helper_vmexit(env, type, param);
547 }
548 }
549 break;
550 default:
551 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
552 helper_vmexit(env, type, param);
553 }
554 break;
555 }
556}
557
558void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
559 uint64_t param)
560{
561 helper_svm_check_intercept_param(env, type, param);
562}
563
564void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
565 uint32_t next_eip_addend)
566{
567 CPUState *cs = CPU(x86_env_get_cpu(env));
568
569 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
570
571 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
572 offsetof(struct vmcb, control.iopm_base_pa));
573 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
574
575 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
576
577 x86_stq_phys(cs,
578 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
579 env->eip + next_eip_addend);
580 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
581 }
582 }
583}
584
585
586void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
587{
588 CPUState *cs = CPU(x86_env_get_cpu(env));
589 uint32_t int_ctl;
590
591 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
592 PRIx64 ", " TARGET_FMT_lx ")!\n",
593 exit_code, exit_info_1,
594 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
595 control.exit_info_2)),
596 env->eip);
597
598 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
599 x86_stl_phys(cs,
600 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
601 SVM_INTERRUPT_SHADOW_MASK);
602 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
603 } else {
604 x86_stl_phys(cs,
605 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
606 }
607
608
609 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
610 &env->segs[R_ES]);
611 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
612 &env->segs[R_CS]);
613 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
614 &env->segs[R_SS]);
615 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
616 &env->segs[R_DS]);
617
618 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
619 env->gdt.base);
620 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
621 env->gdt.limit);
622
623 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
624 env->idt.base);
625 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
626 env->idt.limit);
627
628 x86_stq_phys(cs,
629 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
630 x86_stq_phys(cs,
631 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
632 x86_stq_phys(cs,
633 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
634 x86_stq_phys(cs,
635 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
636 x86_stq_phys(cs,
637 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
638
639 int_ctl = x86_ldl_phys(cs,
640 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
641 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
642 int_ctl |= env->v_tpr & V_TPR_MASK;
643 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
644 int_ctl |= V_IRQ_MASK;
645 }
646 x86_stl_phys(cs,
647 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
648
649 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
650 cpu_compute_eflags(env));
651 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
652 env->eip);
653 x86_stq_phys(cs,
654 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
655 x86_stq_phys(cs,
656 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
657 x86_stq_phys(cs,
658 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
659 x86_stq_phys(cs,
660 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
661 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
662 env->hflags & HF_CPL_MASK);
663
664
665 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
666 env->hflags &= ~HF_SVMI_MASK;
667 env->intercept = 0;
668 env->intercept_exceptions = 0;
669 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
670 env->tsc_offset = 0;
671
672 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
673 save.gdtr.base));
674 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
675 save.gdtr.limit));
676
677 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
678 save.idtr.base));
679 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
680 save.idtr.limit));
681
682 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
683 env->vm_hsave + offsetof(struct vmcb,
684 save.cr0)) |
685 CR0_PE_MASK);
686 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
687 env->vm_hsave + offsetof(struct vmcb,
688 save.cr4)));
689 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
690 env->vm_hsave + offsetof(struct vmcb,
691 save.cr3)));
692
693
694 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
695 save.efer)));
696 env->eflags = 0;
697 cpu_load_eflags(env, x86_ldq_phys(cs,
698 env->vm_hsave + offsetof(struct vmcb,
699 save.rflags)),
700 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
701 VM_MASK));
702
703 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
704 R_ES);
705 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
706 R_CS);
707 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
708 R_SS);
709 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
710 R_DS);
711
712 env->eip = x86_ldq_phys(cs,
713 env->vm_hsave + offsetof(struct vmcb, save.rip));
714 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
715 offsetof(struct vmcb, save.rsp));
716 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
717 offsetof(struct vmcb, save.rax));
718
719 env->dr[6] = x86_ldq_phys(cs,
720 env->vm_hsave + offsetof(struct vmcb, save.dr6));
721 env->dr[7] = x86_ldq_phys(cs,
722 env->vm_hsave + offsetof(struct vmcb, save.dr7));
723
724
725 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
726 exit_code);
727 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
728 exit_info_1);
729
730 x86_stl_phys(cs,
731 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
732 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
733 control.event_inj)));
734 x86_stl_phys(cs,
735 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
736 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
737 control.event_inj_err)));
738 x86_stl_phys(cs,
739 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
740
741 env->hflags2 &= ~HF2_GIF_MASK;
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761 cs->exception_index = -1;
762 env->error_code = 0;
763 env->old_exception = -1;
764
765 cpu_loop_exit(cs);
766}
767
768void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
769{
770 helper_vmexit(env, exit_code, exit_info_1);
771}
772
773#endif
774