1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/helper-proto.h"
23#include "exec/exec-all.h"
24#include "exec/cpu_ldst.h"
25
26
27
28#if defined(CONFIG_USER_ONLY)
29
30void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
31{
32}
33
34void helper_vmmcall(CPUX86State *env)
35{
36}
37
38void helper_vmload(CPUX86State *env, int aflag)
39{
40}
41
42void helper_vmsave(CPUX86State *env, int aflag)
43{
44}
45
46void helper_stgi(CPUX86State *env)
47{
48}
49
50void helper_clgi(CPUX86State *env)
51{
52}
53
54void helper_skinit(CPUX86State *env)
55{
56}
57
58void helper_invlpga(CPUX86State *env, int aflag)
59{
60}
61
62void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
63 uintptr_t retaddr)
64{
65 assert(0);
66}
67
68void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
69 uint64_t param)
70{
71}
72
73void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
74 uint64_t param, uintptr_t retaddr)
75{
76}
77
78void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
79 uint32_t next_eip_addend)
80{
81}
82#else
83
84static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
85 const SegmentCache *sc)
86{
87 CPUState *cs = CPU(x86_env_get_cpu(env));
88
89 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
90 sc->selector);
91 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
92 sc->base);
93 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
94 sc->limit);
95 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
96 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
97}
98
99static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
100 SegmentCache *sc)
101{
102 CPUState *cs = CPU(x86_env_get_cpu(env));
103 unsigned int flags;
104
105 sc->selector = x86_lduw_phys(cs,
106 addr + offsetof(struct vmcb_seg, selector));
107 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
108 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
109 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
110 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
111}
112
113static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
114 int seg_reg)
115{
116 SegmentCache sc1, *sc = &sc1;
117
118 svm_load_seg(env, addr, sc);
119 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
120 sc->base, sc->limit, sc->flags);
121}
122
123void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
124{
125 CPUState *cs = CPU(x86_env_get_cpu(env));
126 target_ulong addr;
127 uint64_t nested_ctl;
128 uint32_t event_inj;
129 uint32_t int_ctl;
130
131 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
132
133 if (aflag == 2) {
134 addr = env->regs[R_EAX];
135 } else {
136 addr = (uint32_t)env->regs[R_EAX];
137 }
138
139 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
140
141 env->vm_vmcb = addr;
142
143
144 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
145 env->gdt.base);
146 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
147 env->gdt.limit);
148
149 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
150 env->idt.base);
151 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
152 env->idt.limit);
153
154 x86_stq_phys(cs,
155 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
156 x86_stq_phys(cs,
157 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
158 x86_stq_phys(cs,
159 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
160 x86_stq_phys(cs,
161 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
162 x86_stq_phys(cs,
163 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
164 x86_stq_phys(cs,
165 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
166
167 x86_stq_phys(cs,
168 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
169 x86_stq_phys(cs,
170 env->vm_hsave + offsetof(struct vmcb, save.rflags),
171 cpu_compute_eflags(env));
172
173 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
174 &env->segs[R_ES]);
175 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
176 &env->segs[R_CS]);
177 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
178 &env->segs[R_SS]);
179 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
180 &env->segs[R_DS]);
181
182 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
183 env->eip + next_eip_addend);
184 x86_stq_phys(cs,
185 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
186 x86_stq_phys(cs,
187 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
188
189
190
191 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
192 control.intercept));
193 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
194 offsetof(struct vmcb,
195 control.intercept_cr_read));
196 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
197 offsetof(struct vmcb,
198 control.intercept_cr_write));
199 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
200 offsetof(struct vmcb,
201 control.intercept_dr_read));
202 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
203 offsetof(struct vmcb,
204 control.intercept_dr_write));
205 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
206 offsetof(struct vmcb,
207 control.intercept_exceptions
208 ));
209
210 nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
211 control.nested_ctl));
212 if (nested_ctl & SVM_NPT_ENABLED) {
213 env->nested_cr3 = x86_ldq_phys(cs,
214 env->vm_vmcb + offsetof(struct vmcb,
215 control.nested_cr3));
216 env->hflags2 |= HF2_NPT_MASK;
217
218 env->nested_pg_mode = 0;
219 if (env->cr[4] & CR4_PAE_MASK) {
220 env->nested_pg_mode |= SVM_NPT_PAE;
221 }
222 if (env->hflags & HF_LMA_MASK) {
223 env->nested_pg_mode |= SVM_NPT_LMA;
224 }
225 if (env->efer & MSR_EFER_NXE) {
226 env->nested_pg_mode |= SVM_NPT_NXE;
227 }
228 }
229
230
231 env->hflags |= HF_GUEST_MASK;
232
233 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
234 offsetof(struct vmcb, control.tsc_offset));
235
236 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
237 save.gdtr.base));
238 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
239 save.gdtr.limit));
240
241 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
242 save.idtr.base));
243 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
244 save.idtr.limit));
245
246
247 x86_stq_phys(cs,
248 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
249
250 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
251 env->vm_vmcb + offsetof(struct vmcb,
252 save.cr0)));
253 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
254 env->vm_vmcb + offsetof(struct vmcb,
255 save.cr4)));
256 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
257 env->vm_vmcb + offsetof(struct vmcb,
258 save.cr3)));
259 env->cr[2] = x86_ldq_phys(cs,
260 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
261 int_ctl = x86_ldl_phys(cs,
262 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
263 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
264 if (int_ctl & V_INTR_MASKING_MASK) {
265 env->v_tpr = int_ctl & V_TPR_MASK;
266 env->hflags2 |= HF2_VINTR_MASK;
267 if (env->eflags & IF_MASK) {
268 env->hflags2 |= HF2_HIF_MASK;
269 }
270 }
271
272 cpu_load_efer(env,
273 x86_ldq_phys(cs,
274 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
275 env->eflags = 0;
276 cpu_load_eflags(env, x86_ldq_phys(cs,
277 env->vm_vmcb + offsetof(struct vmcb,
278 save.rflags)),
279 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
280
281 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
282 R_ES);
283 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
284 R_CS);
285 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
286 R_SS);
287 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
288 R_DS);
289
290 env->eip = x86_ldq_phys(cs,
291 env->vm_vmcb + offsetof(struct vmcb, save.rip));
292
293 env->regs[R_ESP] = x86_ldq_phys(cs,
294 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
295 env->regs[R_EAX] = x86_ldq_phys(cs,
296 env->vm_vmcb + offsetof(struct vmcb, save.rax));
297 env->dr[7] = x86_ldq_phys(cs,
298 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
299 env->dr[6] = x86_ldq_phys(cs,
300 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
301
302
303
304 switch (x86_ldub_phys(cs,
305 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
306 case TLB_CONTROL_DO_NOTHING:
307 break;
308 case TLB_CONTROL_FLUSH_ALL_ASID:
309
310 tlb_flush(cs);
311 break;
312 }
313
314 env->hflags2 |= HF2_GIF_MASK;
315
316 if (int_ctl & V_IRQ_MASK) {
317 CPUState *cs = CPU(x86_env_get_cpu(env));
318
319 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
320 }
321
322
323 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
324 control.event_inj));
325 if (event_inj & SVM_EVTINJ_VALID) {
326 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
327 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
328 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
329 offsetof(struct vmcb,
330 control.event_inj_err));
331
332 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
333
334 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
335 case SVM_EVTINJ_TYPE_INTR:
336 cs->exception_index = vector;
337 env->error_code = event_inj_err;
338 env->exception_is_int = 0;
339 env->exception_next_eip = -1;
340 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
341
342 do_interrupt_x86_hardirq(env, vector, 1);
343 break;
344 case SVM_EVTINJ_TYPE_NMI:
345 cs->exception_index = EXCP02_NMI;
346 env->error_code = event_inj_err;
347 env->exception_is_int = 0;
348 env->exception_next_eip = env->eip;
349 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
350 cpu_loop_exit(cs);
351 break;
352 case SVM_EVTINJ_TYPE_EXEPT:
353 cs->exception_index = vector;
354 env->error_code = event_inj_err;
355 env->exception_is_int = 0;
356 env->exception_next_eip = -1;
357 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
358 cpu_loop_exit(cs);
359 break;
360 case SVM_EVTINJ_TYPE_SOFT:
361 cs->exception_index = vector;
362 env->error_code = event_inj_err;
363 env->exception_is_int = 1;
364 env->exception_next_eip = env->eip;
365 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
366 cpu_loop_exit(cs);
367 break;
368 }
369 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
370 env->error_code);
371 }
372}
373
374void helper_vmmcall(CPUX86State *env)
375{
376 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
377 raise_exception(env, EXCP06_ILLOP);
378}
379
380void helper_vmload(CPUX86State *env, int aflag)
381{
382 CPUState *cs = CPU(x86_env_get_cpu(env));
383 target_ulong addr;
384
385 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
386
387 if (aflag == 2) {
388 addr = env->regs[R_EAX];
389 } else {
390 addr = (uint32_t)env->regs[R_EAX];
391 }
392
393 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
394 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
395 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
396 save.fs.base)),
397 env->segs[R_FS].base);
398
399 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
400 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
401 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
402 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
403
404#ifdef TARGET_X86_64
405 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
406 save.kernel_gs_base));
407 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
408 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
409 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
410#endif
411 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
412 env->sysenter_cs = x86_ldq_phys(cs,
413 addr + offsetof(struct vmcb, save.sysenter_cs));
414 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
415 save.sysenter_esp));
416 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
417 save.sysenter_eip));
418}
419
420void helper_vmsave(CPUX86State *env, int aflag)
421{
422 CPUState *cs = CPU(x86_env_get_cpu(env));
423 target_ulong addr;
424
425 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
426
427 if (aflag == 2) {
428 addr = env->regs[R_EAX];
429 } else {
430 addr = (uint32_t)env->regs[R_EAX];
431 }
432
433 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
434 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
435 addr, x86_ldq_phys(cs,
436 addr + offsetof(struct vmcb, save.fs.base)),
437 env->segs[R_FS].base);
438
439 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
440 &env->segs[R_FS]);
441 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
442 &env->segs[R_GS]);
443 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
444 &env->tr);
445 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
446 &env->ldt);
447
448#ifdef TARGET_X86_64
449 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
450 env->kernelgsbase);
451 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
452 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
453 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
454#endif
455 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
456 x86_stq_phys(cs,
457 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
458 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
459 env->sysenter_esp);
460 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
461 env->sysenter_eip);
462}
463
464void helper_stgi(CPUX86State *env)
465{
466 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
467 env->hflags2 |= HF2_GIF_MASK;
468}
469
470void helper_clgi(CPUX86State *env)
471{
472 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
473 env->hflags2 &= ~HF2_GIF_MASK;
474}
475
476void helper_skinit(CPUX86State *env)
477{
478 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
479
480 raise_exception(env, EXCP06_ILLOP);
481}
482
483void helper_invlpga(CPUX86State *env, int aflag)
484{
485 X86CPU *cpu = x86_env_get_cpu(env);
486 target_ulong addr;
487
488 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
489
490 if (aflag == 2) {
491 addr = env->regs[R_EAX];
492 } else {
493 addr = (uint32_t)env->regs[R_EAX];
494 }
495
496
497
498 tlb_flush_page(CPU(cpu), addr);
499}
500
501void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
502 uint64_t param, uintptr_t retaddr)
503{
504 CPUState *cs = CPU(x86_env_get_cpu(env));
505
506 if (likely(!(env->hflags & HF_GUEST_MASK))) {
507 return;
508 }
509 switch (type) {
510 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
511 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
512 cpu_vmexit(env, type, param, retaddr);
513 }
514 break;
515 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
516 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
517 cpu_vmexit(env, type, param, retaddr);
518 }
519 break;
520 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
521 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
522 cpu_vmexit(env, type, param, retaddr);
523 }
524 break;
525 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
526 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
527 cpu_vmexit(env, type, param, retaddr);
528 }
529 break;
530 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
531 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
532 cpu_vmexit(env, type, param, retaddr);
533 }
534 break;
535 case SVM_EXIT_MSR:
536 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
537
538 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
539 offsetof(struct vmcb,
540 control.msrpm_base_pa));
541 uint32_t t0, t1;
542
543 switch ((uint32_t)env->regs[R_ECX]) {
544 case 0 ... 0x1fff:
545 t0 = (env->regs[R_ECX] * 2) % 8;
546 t1 = (env->regs[R_ECX] * 2) / 8;
547 break;
548 case 0xc0000000 ... 0xc0001fff:
549 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
550 t1 = (t0 / 8);
551 t0 %= 8;
552 break;
553 case 0xc0010000 ... 0xc0011fff:
554 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
555 t1 = (t0 / 8);
556 t0 %= 8;
557 break;
558 default:
559 cpu_vmexit(env, type, param, retaddr);
560 t0 = 0;
561 t1 = 0;
562 break;
563 }
564 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
565 cpu_vmexit(env, type, param, retaddr);
566 }
567 }
568 break;
569 default:
570 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
571 cpu_vmexit(env, type, param, retaddr);
572 }
573 break;
574 }
575}
576
577void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
578 uint64_t param)
579{
580 cpu_svm_check_intercept_param(env, type, param, GETPC());
581}
582
583void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
584 uint32_t next_eip_addend)
585{
586 CPUState *cs = CPU(x86_env_get_cpu(env));
587
588 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
589
590 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
591 offsetof(struct vmcb, control.iopm_base_pa));
592 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
593
594 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
595
596 x86_stq_phys(cs,
597 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
598 env->eip + next_eip_addend);
599 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
600 }
601 }
602}
603
604void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
605 uintptr_t retaddr)
606{
607 CPUState *cs = CPU(x86_env_get_cpu(env));
608
609 cpu_restore_state(cs, retaddr, true);
610
611 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
612 PRIx64 ", " TARGET_FMT_lx ")!\n",
613 exit_code, exit_info_1,
614 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
615 control.exit_info_2)),
616 env->eip);
617
618 cs->exception_index = EXCP_VMEXIT + exit_code;
619 env->error_code = exit_info_1;
620
621
622 env->old_exception = -1;
623 cpu_loop_exit(cs);
624}
625
626void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
627{
628 CPUState *cs = CPU(x86_env_get_cpu(env));
629 uint32_t int_ctl;
630
631 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
632 x86_stl_phys(cs,
633 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
634 SVM_INTERRUPT_SHADOW_MASK);
635 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
636 } else {
637 x86_stl_phys(cs,
638 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
639 }
640 env->hflags2 &= ~HF2_NPT_MASK;
641
642
643 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
644 &env->segs[R_ES]);
645 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
646 &env->segs[R_CS]);
647 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
648 &env->segs[R_SS]);
649 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
650 &env->segs[R_DS]);
651
652 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
653 env->gdt.base);
654 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
655 env->gdt.limit);
656
657 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
658 env->idt.base);
659 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
660 env->idt.limit);
661
662 x86_stq_phys(cs,
663 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
664 x86_stq_phys(cs,
665 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
666 x86_stq_phys(cs,
667 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
668 x86_stq_phys(cs,
669 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
670 x86_stq_phys(cs,
671 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
672
673 int_ctl = x86_ldl_phys(cs,
674 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
675 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
676 int_ctl |= env->v_tpr & V_TPR_MASK;
677 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
678 int_ctl |= V_IRQ_MASK;
679 }
680 x86_stl_phys(cs,
681 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
682
683 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
684 cpu_compute_eflags(env));
685 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
686 env->eip);
687 x86_stq_phys(cs,
688 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
689 x86_stq_phys(cs,
690 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
691 x86_stq_phys(cs,
692 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
693 x86_stq_phys(cs,
694 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
695 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
696 env->hflags & HF_CPL_MASK);
697
698
699 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
700 env->hflags &= ~HF_GUEST_MASK;
701 env->intercept = 0;
702 env->intercept_exceptions = 0;
703 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
704 env->tsc_offset = 0;
705
706 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
707 save.gdtr.base));
708 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
709 save.gdtr.limit));
710
711 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
712 save.idtr.base));
713 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
714 save.idtr.limit));
715
716 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
717 env->vm_hsave + offsetof(struct vmcb,
718 save.cr0)) |
719 CR0_PE_MASK);
720 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
721 env->vm_hsave + offsetof(struct vmcb,
722 save.cr4)));
723 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
724 env->vm_hsave + offsetof(struct vmcb,
725 save.cr3)));
726
727
728 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
729 save.efer)));
730 env->eflags = 0;
731 cpu_load_eflags(env, x86_ldq_phys(cs,
732 env->vm_hsave + offsetof(struct vmcb,
733 save.rflags)),
734 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
735 VM_MASK));
736
737 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
738 R_ES);
739 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
740 R_CS);
741 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
742 R_SS);
743 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
744 R_DS);
745
746 env->eip = x86_ldq_phys(cs,
747 env->vm_hsave + offsetof(struct vmcb, save.rip));
748 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
749 offsetof(struct vmcb, save.rsp));
750 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
751 offsetof(struct vmcb, save.rax));
752
753 env->dr[6] = x86_ldq_phys(cs,
754 env->vm_hsave + offsetof(struct vmcb, save.dr6));
755 env->dr[7] = x86_ldq_phys(cs,
756 env->vm_hsave + offsetof(struct vmcb, save.dr7));
757
758
759 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
760 exit_code);
761 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
762 exit_info_1);
763
764 x86_stl_phys(cs,
765 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
766 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
767 control.event_inj)));
768 x86_stl_phys(cs,
769 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
770 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
771 control.event_inj_err)));
772 x86_stl_phys(cs,
773 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
774
775 env->hflags2 &= ~HF2_GIF_MASK;
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793}
794
795#endif
796