1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/helper-proto.h"
23#include "exec/exec-all.h"
24#include "exec/cpu_ldst.h"
25#include "helper-tcg.h"
26
27
28
29#if defined(CONFIG_USER_ONLY)
30
31void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
32{
33}
34
35void helper_vmmcall(CPUX86State *env)
36{
37}
38
39void helper_vmload(CPUX86State *env, int aflag)
40{
41}
42
43void helper_vmsave(CPUX86State *env, int aflag)
44{
45}
46
47void helper_stgi(CPUX86State *env)
48{
49}
50
51void helper_clgi(CPUX86State *env)
52{
53}
54
55void helper_skinit(CPUX86State *env)
56{
57}
58
59void helper_invlpga(CPUX86State *env, int aflag)
60{
61}
62
63void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
64 uintptr_t retaddr)
65{
66 assert(0);
67}
68
69void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
70 uint64_t param)
71{
72}
73
74void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
75 uint64_t param, uintptr_t retaddr)
76{
77}
78
79void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
80 uint32_t next_eip_addend)
81{
82}
83#else
84
85static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
86 const SegmentCache *sc)
87{
88 CPUState *cs = env_cpu(env);
89
90 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
91 sc->selector);
92 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
93 sc->base);
94 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
95 sc->limit);
96 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
97 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
98}
99
100static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
101 SegmentCache *sc)
102{
103 CPUState *cs = env_cpu(env);
104 unsigned int flags;
105
106 sc->selector = x86_lduw_phys(cs,
107 addr + offsetof(struct vmcb_seg, selector));
108 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
109 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
110 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
111 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
112}
113
114static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
115 int seg_reg)
116{
117 SegmentCache sc1, *sc = &sc1;
118
119 svm_load_seg(env, addr, sc);
120 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
121 sc->base, sc->limit, sc->flags);
122}
123
124void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
125{
126 CPUState *cs = env_cpu(env);
127 target_ulong addr;
128 uint64_t nested_ctl;
129 uint32_t event_inj;
130 uint32_t int_ctl;
131
132 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
133
134 if (aflag == 2) {
135 addr = env->regs[R_EAX];
136 } else {
137 addr = (uint32_t)env->regs[R_EAX];
138 }
139
140 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
141
142 env->vm_vmcb = addr;
143
144
145 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
146 env->gdt.base);
147 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
148 env->gdt.limit);
149
150 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
151 env->idt.base);
152 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
153 env->idt.limit);
154
155 x86_stq_phys(cs,
156 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
157 x86_stq_phys(cs,
158 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
159 x86_stq_phys(cs,
160 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
161 x86_stq_phys(cs,
162 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
163 x86_stq_phys(cs,
164 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
165 x86_stq_phys(cs,
166 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
167
168 x86_stq_phys(cs,
169 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
170 x86_stq_phys(cs,
171 env->vm_hsave + offsetof(struct vmcb, save.rflags),
172 cpu_compute_eflags(env));
173
174 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
175 &env->segs[R_ES]);
176 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
177 &env->segs[R_CS]);
178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
179 &env->segs[R_SS]);
180 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
181 &env->segs[R_DS]);
182
183 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
184 env->eip + next_eip_addend);
185 x86_stq_phys(cs,
186 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
187 x86_stq_phys(cs,
188 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
189
190
191
192 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
193 control.intercept));
194 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
195 offsetof(struct vmcb,
196 control.intercept_cr_read));
197 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
198 offsetof(struct vmcb,
199 control.intercept_cr_write));
200 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
201 offsetof(struct vmcb,
202 control.intercept_dr_read));
203 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
204 offsetof(struct vmcb,
205 control.intercept_dr_write));
206 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
207 offsetof(struct vmcb,
208 control.intercept_exceptions
209 ));
210
211 nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
212 control.nested_ctl));
213
214 env->nested_pg_mode = 0;
215
216 if (nested_ctl & SVM_NPT_ENABLED) {
217 env->nested_cr3 = x86_ldq_phys(cs,
218 env->vm_vmcb + offsetof(struct vmcb,
219 control.nested_cr3));
220 env->hflags2 |= HF2_NPT_MASK;
221
222 if (env->cr[4] & CR4_PAE_MASK) {
223 env->nested_pg_mode |= SVM_NPT_PAE;
224 }
225 if (env->cr[4] & CR4_PSE_MASK) {
226 env->nested_pg_mode |= SVM_NPT_PSE;
227 }
228 if (env->hflags & HF_LMA_MASK) {
229 env->nested_pg_mode |= SVM_NPT_LMA;
230 }
231 if (env->efer & MSR_EFER_NXE) {
232 env->nested_pg_mode |= SVM_NPT_NXE;
233 }
234 }
235
236
237 env->hflags |= HF_GUEST_MASK;
238
239 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
240 offsetof(struct vmcb, control.tsc_offset));
241
242 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
243 save.gdtr.base));
244 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
245 save.gdtr.limit));
246
247 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
248 save.idtr.base));
249 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
250 save.idtr.limit));
251
252
253 x86_stq_phys(cs,
254 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
255
256 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
257 env->vm_vmcb + offsetof(struct vmcb,
258 save.cr0)));
259 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
260 env->vm_vmcb + offsetof(struct vmcb,
261 save.cr4)));
262 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
263 env->vm_vmcb + offsetof(struct vmcb,
264 save.cr3)));
265 env->cr[2] = x86_ldq_phys(cs,
266 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
267 int_ctl = x86_ldl_phys(cs,
268 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
269 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
270 if (int_ctl & V_INTR_MASKING_MASK) {
271 env->v_tpr = int_ctl & V_TPR_MASK;
272 env->hflags2 |= HF2_VINTR_MASK;
273 if (env->eflags & IF_MASK) {
274 env->hflags2 |= HF2_HIF_MASK;
275 }
276 }
277
278 cpu_load_efer(env,
279 x86_ldq_phys(cs,
280 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
281 env->eflags = 0;
282 cpu_load_eflags(env, x86_ldq_phys(cs,
283 env->vm_vmcb + offsetof(struct vmcb,
284 save.rflags)),
285 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
286
287 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
288 R_ES);
289 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
290 R_CS);
291 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
292 R_SS);
293 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
294 R_DS);
295
296 env->eip = x86_ldq_phys(cs,
297 env->vm_vmcb + offsetof(struct vmcb, save.rip));
298
299 env->regs[R_ESP] = x86_ldq_phys(cs,
300 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
301 env->regs[R_EAX] = x86_ldq_phys(cs,
302 env->vm_vmcb + offsetof(struct vmcb, save.rax));
303 env->dr[7] = x86_ldq_phys(cs,
304 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
305 env->dr[6] = x86_ldq_phys(cs,
306 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
307
308
309
310 switch (x86_ldub_phys(cs,
311 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
312 case TLB_CONTROL_DO_NOTHING:
313 break;
314 case TLB_CONTROL_FLUSH_ALL_ASID:
315
316 tlb_flush(cs);
317 break;
318 }
319
320 env->hflags2 |= HF2_GIF_MASK;
321
322 if (int_ctl & V_IRQ_MASK) {
323 CPUState *cs = env_cpu(env);
324
325 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
326 }
327
328
329 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
330 control.event_inj));
331 if (event_inj & SVM_EVTINJ_VALID) {
332 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
333 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
334 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
335 offsetof(struct vmcb,
336 control.event_inj_err));
337
338 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
339
340 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
341 case SVM_EVTINJ_TYPE_INTR:
342 cs->exception_index = vector;
343 env->error_code = event_inj_err;
344 env->exception_is_int = 0;
345 env->exception_next_eip = -1;
346 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
347
348 do_interrupt_x86_hardirq(env, vector, 1);
349 break;
350 case SVM_EVTINJ_TYPE_NMI:
351 cs->exception_index = EXCP02_NMI;
352 env->error_code = event_inj_err;
353 env->exception_is_int = 0;
354 env->exception_next_eip = env->eip;
355 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
356 cpu_loop_exit(cs);
357 break;
358 case SVM_EVTINJ_TYPE_EXEPT:
359 cs->exception_index = vector;
360 env->error_code = event_inj_err;
361 env->exception_is_int = 0;
362 env->exception_next_eip = -1;
363 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
364 cpu_loop_exit(cs);
365 break;
366 case SVM_EVTINJ_TYPE_SOFT:
367 cs->exception_index = vector;
368 env->error_code = event_inj_err;
369 env->exception_is_int = 1;
370 env->exception_next_eip = env->eip;
371 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
372 cpu_loop_exit(cs);
373 break;
374 }
375 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
376 env->error_code);
377 }
378}
379
380void helper_vmmcall(CPUX86State *env)
381{
382 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
383 raise_exception(env, EXCP06_ILLOP);
384}
385
386void helper_vmload(CPUX86State *env, int aflag)
387{
388 CPUState *cs = env_cpu(env);
389 target_ulong addr;
390
391 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
392
393 if (aflag == 2) {
394 addr = env->regs[R_EAX];
395 } else {
396 addr = (uint32_t)env->regs[R_EAX];
397 }
398
399 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
400 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
401 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
402 save.fs.base)),
403 env->segs[R_FS].base);
404
405 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
406 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
407 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
408 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
409
410#ifdef TARGET_X86_64
411 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
412 save.kernel_gs_base));
413 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
414 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
415 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
416#endif
417 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
418 env->sysenter_cs = x86_ldq_phys(cs,
419 addr + offsetof(struct vmcb, save.sysenter_cs));
420 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
421 save.sysenter_esp));
422 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
423 save.sysenter_eip));
424}
425
426void helper_vmsave(CPUX86State *env, int aflag)
427{
428 CPUState *cs = env_cpu(env);
429 target_ulong addr;
430
431 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
432
433 if (aflag == 2) {
434 addr = env->regs[R_EAX];
435 } else {
436 addr = (uint32_t)env->regs[R_EAX];
437 }
438
439 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
440 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
441 addr, x86_ldq_phys(cs,
442 addr + offsetof(struct vmcb, save.fs.base)),
443 env->segs[R_FS].base);
444
445 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
446 &env->segs[R_FS]);
447 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
448 &env->segs[R_GS]);
449 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
450 &env->tr);
451 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
452 &env->ldt);
453
454#ifdef TARGET_X86_64
455 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
456 env->kernelgsbase);
457 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
458 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
459 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
460#endif
461 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
462 x86_stq_phys(cs,
463 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
464 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
465 env->sysenter_esp);
466 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
467 env->sysenter_eip);
468}
469
470void helper_stgi(CPUX86State *env)
471{
472 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
473 env->hflags2 |= HF2_GIF_MASK;
474}
475
476void helper_clgi(CPUX86State *env)
477{
478 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
479 env->hflags2 &= ~HF2_GIF_MASK;
480}
481
482void helper_skinit(CPUX86State *env)
483{
484 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
485
486 raise_exception(env, EXCP06_ILLOP);
487}
488
489void helper_invlpga(CPUX86State *env, int aflag)
490{
491 X86CPU *cpu = env_archcpu(env);
492 target_ulong addr;
493
494 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
495
496 if (aflag == 2) {
497 addr = env->regs[R_EAX];
498 } else {
499 addr = (uint32_t)env->regs[R_EAX];
500 }
501
502
503
504 tlb_flush_page(CPU(cpu), addr);
505}
506
507void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
508 uint64_t param, uintptr_t retaddr)
509{
510 CPUState *cs = env_cpu(env);
511
512 if (likely(!(env->hflags & HF_GUEST_MASK))) {
513 return;
514 }
515 switch (type) {
516 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
517 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
518 cpu_vmexit(env, type, param, retaddr);
519 }
520 break;
521 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
522 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
523 cpu_vmexit(env, type, param, retaddr);
524 }
525 break;
526 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
527 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
528 cpu_vmexit(env, type, param, retaddr);
529 }
530 break;
531 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
532 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
533 cpu_vmexit(env, type, param, retaddr);
534 }
535 break;
536 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
537 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
538 cpu_vmexit(env, type, param, retaddr);
539 }
540 break;
541 case SVM_EXIT_MSR:
542 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
543
544 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
545 offsetof(struct vmcb,
546 control.msrpm_base_pa));
547 uint32_t t0, t1;
548
549 switch ((uint32_t)env->regs[R_ECX]) {
550 case 0 ... 0x1fff:
551 t0 = (env->regs[R_ECX] * 2) % 8;
552 t1 = (env->regs[R_ECX] * 2) / 8;
553 break;
554 case 0xc0000000 ... 0xc0001fff:
555 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
556 t1 = (t0 / 8);
557 t0 %= 8;
558 break;
559 case 0xc0010000 ... 0xc0011fff:
560 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
561 t1 = (t0 / 8);
562 t0 %= 8;
563 break;
564 default:
565 cpu_vmexit(env, type, param, retaddr);
566 t0 = 0;
567 t1 = 0;
568 break;
569 }
570 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
571 cpu_vmexit(env, type, param, retaddr);
572 }
573 }
574 break;
575 default:
576 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
577 cpu_vmexit(env, type, param, retaddr);
578 }
579 break;
580 }
581}
582
583void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
584 uint64_t param)
585{
586 cpu_svm_check_intercept_param(env, type, param, GETPC());
587}
588
589void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
590 uint32_t next_eip_addend)
591{
592 CPUState *cs = env_cpu(env);
593
594 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
595
596 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
597 offsetof(struct vmcb, control.iopm_base_pa));
598 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
599
600 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
601
602 x86_stq_phys(cs,
603 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
604 env->eip + next_eip_addend);
605 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
606 }
607 }
608}
609
610void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
611 uintptr_t retaddr)
612{
613 CPUState *cs = env_cpu(env);
614
615 cpu_restore_state(cs, retaddr, true);
616
617 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
618 PRIx64 ", " TARGET_FMT_lx ")!\n",
619 exit_code, exit_info_1,
620 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
621 control.exit_info_2)),
622 env->eip);
623
624 cs->exception_index = EXCP_VMEXIT;
625 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
626 exit_code);
627
628 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
629 control.exit_info_1), exit_info_1),
630
631
632 env->old_exception = -1;
633 cpu_loop_exit(cs);
634}
635
636void do_vmexit(CPUX86State *env)
637{
638 CPUState *cs = env_cpu(env);
639 uint32_t int_ctl;
640
641 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
642 x86_stl_phys(cs,
643 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
644 SVM_INTERRUPT_SHADOW_MASK);
645 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
646 } else {
647 x86_stl_phys(cs,
648 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
649 }
650 env->hflags2 &= ~HF2_NPT_MASK;
651
652
653 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
654 &env->segs[R_ES]);
655 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
656 &env->segs[R_CS]);
657 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
658 &env->segs[R_SS]);
659 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
660 &env->segs[R_DS]);
661
662 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
663 env->gdt.base);
664 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
665 env->gdt.limit);
666
667 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
668 env->idt.base);
669 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
670 env->idt.limit);
671
672 x86_stq_phys(cs,
673 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
674 x86_stq_phys(cs,
675 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
676 x86_stq_phys(cs,
677 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
678 x86_stq_phys(cs,
679 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
680 x86_stq_phys(cs,
681 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
682
683 int_ctl = x86_ldl_phys(cs,
684 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
685 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
686 int_ctl |= env->v_tpr & V_TPR_MASK;
687 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
688 int_ctl |= V_IRQ_MASK;
689 }
690 x86_stl_phys(cs,
691 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
692
693 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
694 cpu_compute_eflags(env));
695 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
696 env->eip);
697 x86_stq_phys(cs,
698 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
699 x86_stq_phys(cs,
700 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
701 x86_stq_phys(cs,
702 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
703 x86_stq_phys(cs,
704 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
705 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
706 env->hflags & HF_CPL_MASK);
707
708
709 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
710 env->hflags &= ~HF_GUEST_MASK;
711 env->intercept = 0;
712 env->intercept_exceptions = 0;
713 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
714 env->tsc_offset = 0;
715
716 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
717 save.gdtr.base));
718 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
719 save.gdtr.limit));
720
721 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
722 save.idtr.base));
723 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
724 save.idtr.limit));
725
726 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
727 env->vm_hsave + offsetof(struct vmcb,
728 save.cr0)) |
729 CR0_PE_MASK);
730 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
731 env->vm_hsave + offsetof(struct vmcb,
732 save.cr4)));
733 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
734 env->vm_hsave + offsetof(struct vmcb,
735 save.cr3)));
736
737
738 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
739 save.efer)));
740 env->eflags = 0;
741 cpu_load_eflags(env, x86_ldq_phys(cs,
742 env->vm_hsave + offsetof(struct vmcb,
743 save.rflags)),
744 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
745 VM_MASK));
746
747 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
748 R_ES);
749 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
750 R_CS);
751 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
752 R_SS);
753 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
754 R_DS);
755
756 env->eip = x86_ldq_phys(cs,
757 env->vm_hsave + offsetof(struct vmcb, save.rip));
758 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
759 offsetof(struct vmcb, save.rsp));
760 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
761 offsetof(struct vmcb, save.rax));
762
763 env->dr[6] = x86_ldq_phys(cs,
764 env->vm_hsave + offsetof(struct vmcb, save.dr6));
765 env->dr[7] = x86_ldq_phys(cs,
766 env->vm_hsave + offsetof(struct vmcb, save.dr7));
767
768
769 x86_stl_phys(cs,
770 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
771 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
772 control.event_inj)));
773 x86_stl_phys(cs,
774 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
775 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
776 control.event_inj_err)));
777 x86_stl_phys(cs,
778 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
779
780 env->hflags2 &= ~HF2_GIF_MASK;
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798}
799
800#endif
801