1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/helper-proto.h"
23#include "exec/exec-all.h"
24#include "exec/cpu_ldst.h"
25
26
27
28#if defined(CONFIG_USER_ONLY)
29
30void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
31{
32}
33
34void helper_vmmcall(CPUX86State *env)
35{
36}
37
38void helper_vmload(CPUX86State *env, int aflag)
39{
40}
41
42void helper_vmsave(CPUX86State *env, int aflag)
43{
44}
45
46void helper_stgi(CPUX86State *env)
47{
48}
49
50void helper_clgi(CPUX86State *env)
51{
52}
53
54void helper_skinit(CPUX86State *env)
55{
56}
57
58void helper_invlpga(CPUX86State *env, int aflag)
59{
60}
61
62void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
63 uintptr_t retaddr)
64{
65 assert(0);
66}
67
68void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
69 uint64_t param)
70{
71}
72
73void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
74 uint64_t param, uintptr_t retaddr)
75{
76}
77
78void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
79 uint32_t next_eip_addend)
80{
81}
82#else
83
84static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
85 const SegmentCache *sc)
86{
87 CPUState *cs = env_cpu(env);
88
89 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
90 sc->selector);
91 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
92 sc->base);
93 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
94 sc->limit);
95 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
96 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
97}
98
99static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
100 SegmentCache *sc)
101{
102 CPUState *cs = env_cpu(env);
103 unsigned int flags;
104
105 sc->selector = x86_lduw_phys(cs,
106 addr + offsetof(struct vmcb_seg, selector));
107 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
108 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
109 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
110 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
111}
112
113static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
114 int seg_reg)
115{
116 SegmentCache sc1, *sc = &sc1;
117
118 svm_load_seg(env, addr, sc);
119 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
120 sc->base, sc->limit, sc->flags);
121}
122
123void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
124{
125 CPUState *cs = env_cpu(env);
126 target_ulong addr;
127 uint64_t nested_ctl;
128 uint32_t event_inj;
129 uint32_t int_ctl;
130
131 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
132
133 if (aflag == 2) {
134 addr = env->regs[R_EAX];
135 } else {
136 addr = (uint32_t)env->regs[R_EAX];
137 }
138
139 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
140
141 env->vm_vmcb = addr;
142
143
144 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
145 env->gdt.base);
146 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
147 env->gdt.limit);
148
149 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
150 env->idt.base);
151 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
152 env->idt.limit);
153
154 x86_stq_phys(cs,
155 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
156 x86_stq_phys(cs,
157 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
158 x86_stq_phys(cs,
159 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
160 x86_stq_phys(cs,
161 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
162 x86_stq_phys(cs,
163 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
164 x86_stq_phys(cs,
165 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
166
167 x86_stq_phys(cs,
168 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
169 x86_stq_phys(cs,
170 env->vm_hsave + offsetof(struct vmcb, save.rflags),
171 cpu_compute_eflags(env));
172
173 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
174 &env->segs[R_ES]);
175 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
176 &env->segs[R_CS]);
177 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
178 &env->segs[R_SS]);
179 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
180 &env->segs[R_DS]);
181
182 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
183 env->eip + next_eip_addend);
184 x86_stq_phys(cs,
185 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
186 x86_stq_phys(cs,
187 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
188
189
190
191 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
192 control.intercept));
193 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
194 offsetof(struct vmcb,
195 control.intercept_cr_read));
196 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
197 offsetof(struct vmcb,
198 control.intercept_cr_write));
199 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
200 offsetof(struct vmcb,
201 control.intercept_dr_read));
202 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
203 offsetof(struct vmcb,
204 control.intercept_dr_write));
205 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
206 offsetof(struct vmcb,
207 control.intercept_exceptions
208 ));
209
210 nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
211 control.nested_ctl));
212
213 env->nested_pg_mode = 0;
214
215 if (nested_ctl & SVM_NPT_ENABLED) {
216 env->nested_cr3 = x86_ldq_phys(cs,
217 env->vm_vmcb + offsetof(struct vmcb,
218 control.nested_cr3));
219 env->hflags2 |= HF2_NPT_MASK;
220
221 if (env->cr[4] & CR4_PAE_MASK) {
222 env->nested_pg_mode |= SVM_NPT_PAE;
223 }
224 if (env->cr[4] & CR4_PSE_MASK) {
225 env->nested_pg_mode |= SVM_NPT_PSE;
226 }
227 if (env->hflags & HF_LMA_MASK) {
228 env->nested_pg_mode |= SVM_NPT_LMA;
229 }
230 if (env->efer & MSR_EFER_NXE) {
231 env->nested_pg_mode |= SVM_NPT_NXE;
232 }
233 }
234
235
236 env->hflags |= HF_GUEST_MASK;
237
238 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
239 offsetof(struct vmcb, control.tsc_offset));
240
241 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
242 save.gdtr.base));
243 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
244 save.gdtr.limit));
245
246 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
247 save.idtr.base));
248 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
249 save.idtr.limit));
250
251
252 x86_stq_phys(cs,
253 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
254
255 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
256 env->vm_vmcb + offsetof(struct vmcb,
257 save.cr0)));
258 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
259 env->vm_vmcb + offsetof(struct vmcb,
260 save.cr4)));
261 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
262 env->vm_vmcb + offsetof(struct vmcb,
263 save.cr3)));
264 env->cr[2] = x86_ldq_phys(cs,
265 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
266 int_ctl = x86_ldl_phys(cs,
267 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
268 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
269 if (int_ctl & V_INTR_MASKING_MASK) {
270 env->v_tpr = int_ctl & V_TPR_MASK;
271 env->hflags2 |= HF2_VINTR_MASK;
272 if (env->eflags & IF_MASK) {
273 env->hflags2 |= HF2_HIF_MASK;
274 }
275 }
276
277 cpu_load_efer(env,
278 x86_ldq_phys(cs,
279 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
280 env->eflags = 0;
281 cpu_load_eflags(env, x86_ldq_phys(cs,
282 env->vm_vmcb + offsetof(struct vmcb,
283 save.rflags)),
284 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
285
286 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
287 R_ES);
288 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
289 R_CS);
290 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
291 R_SS);
292 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
293 R_DS);
294
295 env->eip = x86_ldq_phys(cs,
296 env->vm_vmcb + offsetof(struct vmcb, save.rip));
297
298 env->regs[R_ESP] = x86_ldq_phys(cs,
299 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
300 env->regs[R_EAX] = x86_ldq_phys(cs,
301 env->vm_vmcb + offsetof(struct vmcb, save.rax));
302 env->dr[7] = x86_ldq_phys(cs,
303 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
304 env->dr[6] = x86_ldq_phys(cs,
305 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
306
307
308
309 switch (x86_ldub_phys(cs,
310 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
311 case TLB_CONTROL_DO_NOTHING:
312 break;
313 case TLB_CONTROL_FLUSH_ALL_ASID:
314
315 tlb_flush(cs);
316 break;
317 }
318
319 env->hflags2 |= HF2_GIF_MASK;
320
321 if (int_ctl & V_IRQ_MASK) {
322 CPUState *cs = env_cpu(env);
323
324 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
325 }
326
327
328 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
329 control.event_inj));
330 if (event_inj & SVM_EVTINJ_VALID) {
331 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
332 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
333 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
334 offsetof(struct vmcb,
335 control.event_inj_err));
336
337 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
338
339 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
340 case SVM_EVTINJ_TYPE_INTR:
341 cs->exception_index = vector;
342 env->error_code = event_inj_err;
343 env->exception_is_int = 0;
344 env->exception_next_eip = -1;
345 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
346
347 do_interrupt_x86_hardirq(env, vector, 1);
348 break;
349 case SVM_EVTINJ_TYPE_NMI:
350 cs->exception_index = EXCP02_NMI;
351 env->error_code = event_inj_err;
352 env->exception_is_int = 0;
353 env->exception_next_eip = env->eip;
354 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
355 cpu_loop_exit(cs);
356 break;
357 case SVM_EVTINJ_TYPE_EXEPT:
358 cs->exception_index = vector;
359 env->error_code = event_inj_err;
360 env->exception_is_int = 0;
361 env->exception_next_eip = -1;
362 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
363 cpu_loop_exit(cs);
364 break;
365 case SVM_EVTINJ_TYPE_SOFT:
366 cs->exception_index = vector;
367 env->error_code = event_inj_err;
368 env->exception_is_int = 1;
369 env->exception_next_eip = env->eip;
370 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
371 cpu_loop_exit(cs);
372 break;
373 }
374 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
375 env->error_code);
376 }
377}
378
379void helper_vmmcall(CPUX86State *env)
380{
381 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
382 raise_exception(env, EXCP06_ILLOP);
383}
384
385void helper_vmload(CPUX86State *env, int aflag)
386{
387 CPUState *cs = env_cpu(env);
388 target_ulong addr;
389
390 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
391
392 if (aflag == 2) {
393 addr = env->regs[R_EAX];
394 } else {
395 addr = (uint32_t)env->regs[R_EAX];
396 }
397
398 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
399 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
400 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
401 save.fs.base)),
402 env->segs[R_FS].base);
403
404 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
405 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
406 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
407 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
408
409#ifdef TARGET_X86_64
410 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
411 save.kernel_gs_base));
412 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
413 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
414 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
415#endif
416 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
417 env->sysenter_cs = x86_ldq_phys(cs,
418 addr + offsetof(struct vmcb, save.sysenter_cs));
419 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
420 save.sysenter_esp));
421 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
422 save.sysenter_eip));
423}
424
425void helper_vmsave(CPUX86State *env, int aflag)
426{
427 CPUState *cs = env_cpu(env);
428 target_ulong addr;
429
430 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
431
432 if (aflag == 2) {
433 addr = env->regs[R_EAX];
434 } else {
435 addr = (uint32_t)env->regs[R_EAX];
436 }
437
438 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
439 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
440 addr, x86_ldq_phys(cs,
441 addr + offsetof(struct vmcb, save.fs.base)),
442 env->segs[R_FS].base);
443
444 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
445 &env->segs[R_FS]);
446 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
447 &env->segs[R_GS]);
448 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
449 &env->tr);
450 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
451 &env->ldt);
452
453#ifdef TARGET_X86_64
454 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
455 env->kernelgsbase);
456 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
457 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
458 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
459#endif
460 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
461 x86_stq_phys(cs,
462 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
463 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
464 env->sysenter_esp);
465 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
466 env->sysenter_eip);
467}
468
469void helper_stgi(CPUX86State *env)
470{
471 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
472 env->hflags2 |= HF2_GIF_MASK;
473}
474
475void helper_clgi(CPUX86State *env)
476{
477 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
478 env->hflags2 &= ~HF2_GIF_MASK;
479}
480
481void helper_skinit(CPUX86State *env)
482{
483 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
484
485 raise_exception(env, EXCP06_ILLOP);
486}
487
488void helper_invlpga(CPUX86State *env, int aflag)
489{
490 X86CPU *cpu = env_archcpu(env);
491 target_ulong addr;
492
493 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
494
495 if (aflag == 2) {
496 addr = env->regs[R_EAX];
497 } else {
498 addr = (uint32_t)env->regs[R_EAX];
499 }
500
501
502
503 tlb_flush_page(CPU(cpu), addr);
504}
505
506void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
507 uint64_t param, uintptr_t retaddr)
508{
509 CPUState *cs = env_cpu(env);
510
511 if (likely(!(env->hflags & HF_GUEST_MASK))) {
512 return;
513 }
514 switch (type) {
515 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
516 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
517 cpu_vmexit(env, type, param, retaddr);
518 }
519 break;
520 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
521 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
522 cpu_vmexit(env, type, param, retaddr);
523 }
524 break;
525 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
526 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
527 cpu_vmexit(env, type, param, retaddr);
528 }
529 break;
530 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
531 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
532 cpu_vmexit(env, type, param, retaddr);
533 }
534 break;
535 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
536 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
537 cpu_vmexit(env, type, param, retaddr);
538 }
539 break;
540 case SVM_EXIT_MSR:
541 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
542
543 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
544 offsetof(struct vmcb,
545 control.msrpm_base_pa));
546 uint32_t t0, t1;
547
548 switch ((uint32_t)env->regs[R_ECX]) {
549 case 0 ... 0x1fff:
550 t0 = (env->regs[R_ECX] * 2) % 8;
551 t1 = (env->regs[R_ECX] * 2) / 8;
552 break;
553 case 0xc0000000 ... 0xc0001fff:
554 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
555 t1 = (t0 / 8);
556 t0 %= 8;
557 break;
558 case 0xc0010000 ... 0xc0011fff:
559 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
560 t1 = (t0 / 8);
561 t0 %= 8;
562 break;
563 default:
564 cpu_vmexit(env, type, param, retaddr);
565 t0 = 0;
566 t1 = 0;
567 break;
568 }
569 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
570 cpu_vmexit(env, type, param, retaddr);
571 }
572 }
573 break;
574 default:
575 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
576 cpu_vmexit(env, type, param, retaddr);
577 }
578 break;
579 }
580}
581
582void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
583 uint64_t param)
584{
585 cpu_svm_check_intercept_param(env, type, param, GETPC());
586}
587
588void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
589 uint32_t next_eip_addend)
590{
591 CPUState *cs = env_cpu(env);
592
593 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
594
595 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
596 offsetof(struct vmcb, control.iopm_base_pa));
597 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
598
599 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
600
601 x86_stq_phys(cs,
602 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
603 env->eip + next_eip_addend);
604 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
605 }
606 }
607}
608
609void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
610 uintptr_t retaddr)
611{
612 CPUState *cs = env_cpu(env);
613
614 cpu_restore_state(cs, retaddr, true);
615
616 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
617 PRIx64 ", " TARGET_FMT_lx ")!\n",
618 exit_code, exit_info_1,
619 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
620 control.exit_info_2)),
621 env->eip);
622
623 cs->exception_index = EXCP_VMEXIT + exit_code;
624 env->error_code = exit_info_1;
625
626
627 env->old_exception = -1;
628 cpu_loop_exit(cs);
629}
630
631void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
632{
633 CPUState *cs = env_cpu(env);
634 uint32_t int_ctl;
635
636 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
637 x86_stl_phys(cs,
638 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
639 SVM_INTERRUPT_SHADOW_MASK);
640 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
641 } else {
642 x86_stl_phys(cs,
643 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
644 }
645 env->hflags2 &= ~HF2_NPT_MASK;
646
647
648 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
649 &env->segs[R_ES]);
650 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
651 &env->segs[R_CS]);
652 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
653 &env->segs[R_SS]);
654 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
655 &env->segs[R_DS]);
656
657 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
658 env->gdt.base);
659 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
660 env->gdt.limit);
661
662 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
663 env->idt.base);
664 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
665 env->idt.limit);
666
667 x86_stq_phys(cs,
668 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
669 x86_stq_phys(cs,
670 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
671 x86_stq_phys(cs,
672 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
673 x86_stq_phys(cs,
674 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
675 x86_stq_phys(cs,
676 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
677
678 int_ctl = x86_ldl_phys(cs,
679 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
680 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
681 int_ctl |= env->v_tpr & V_TPR_MASK;
682 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
683 int_ctl |= V_IRQ_MASK;
684 }
685 x86_stl_phys(cs,
686 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
687
688 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
689 cpu_compute_eflags(env));
690 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
691 env->eip);
692 x86_stq_phys(cs,
693 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
694 x86_stq_phys(cs,
695 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
696 x86_stq_phys(cs,
697 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
698 x86_stq_phys(cs,
699 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
700 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
701 env->hflags & HF_CPL_MASK);
702
703
704 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
705 env->hflags &= ~HF_GUEST_MASK;
706 env->intercept = 0;
707 env->intercept_exceptions = 0;
708 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
709 env->tsc_offset = 0;
710
711 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
712 save.gdtr.base));
713 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
714 save.gdtr.limit));
715
716 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
717 save.idtr.base));
718 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
719 save.idtr.limit));
720
721 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
722 env->vm_hsave + offsetof(struct vmcb,
723 save.cr0)) |
724 CR0_PE_MASK);
725 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
726 env->vm_hsave + offsetof(struct vmcb,
727 save.cr4)));
728 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
729 env->vm_hsave + offsetof(struct vmcb,
730 save.cr3)));
731
732
733 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
734 save.efer)));
735 env->eflags = 0;
736 cpu_load_eflags(env, x86_ldq_phys(cs,
737 env->vm_hsave + offsetof(struct vmcb,
738 save.rflags)),
739 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
740 VM_MASK));
741
742 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
743 R_ES);
744 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
745 R_CS);
746 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
747 R_SS);
748 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
749 R_DS);
750
751 env->eip = x86_ldq_phys(cs,
752 env->vm_hsave + offsetof(struct vmcb, save.rip));
753 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
754 offsetof(struct vmcb, save.rsp));
755 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
756 offsetof(struct vmcb, save.rax));
757
758 env->dr[6] = x86_ldq_phys(cs,
759 env->vm_hsave + offsetof(struct vmcb, save.dr6));
760 env->dr[7] = x86_ldq_phys(cs,
761 env->vm_hsave + offsetof(struct vmcb, save.dr7));
762
763
764 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
765 exit_code);
766 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
767 exit_info_1);
768
769 x86_stl_phys(cs,
770 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
771 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
772 control.event_inj)));
773 x86_stl_phys(cs,
774 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
775 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
776 control.event_inj_err)));
777 x86_stl_phys(cs,
778 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
779
780 env->hflags2 &= ~HF2_GIF_MASK;
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798}
799
800#endif
801