1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/helper-proto.h"
23#include "exec/exec-all.h"
24#include "exec/cpu_ldst.h"
25#include "tcg/helper-tcg.h"
26
27
28
29static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
30 const SegmentCache *sc)
31{
32 CPUState *cs = env_cpu(env);
33
34 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
35 sc->selector);
36 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
37 sc->base);
38 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
39 sc->limit);
40 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
41 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
42}
43
44
45
46
47
48static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
49{
50 uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
51 *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
52}
53
54static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
55 SegmentCache *sc)
56{
57 CPUState *cs = env_cpu(env);
58 unsigned int flags;
59
60 sc->selector = x86_lduw_phys(cs,
61 addr + offsetof(struct vmcb_seg, selector));
62 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
63 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
64 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
65 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
66 svm_canonicalization(env, &sc->base);
67}
68
69static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
70 int seg_reg)
71{
72 SegmentCache sc1, *sc = &sc1;
73
74 svm_load_seg(env, addr, sc);
75 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
76 sc->base, sc->limit, sc->flags);
77}
78
79static inline bool is_efer_invalid_state (CPUX86State *env)
80{
81 if (!(env->efer & MSR_EFER_SVME)) {
82 return true;
83 }
84
85 if (env->efer & MSR_EFER_RESERVED) {
86 return true;
87 }
88
89 if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
90 !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
91 return true;
92 }
93
94 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
95 && !(env->cr[4] & CR4_PAE_MASK)) {
96 return true;
97 }
98
99 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
100 && !(env->cr[0] & CR0_PE_MASK)) {
101 return true;
102 }
103
104 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
105 && (env->cr[4] & CR4_PAE_MASK)
106 && (env->segs[R_CS].flags & DESC_L_MASK)
107 && (env->segs[R_CS].flags & DESC_B_MASK)) {
108 return true;
109 }
110
111 return false;
112}
113
114static inline bool virtual_gif_enabled(CPUX86State *env)
115{
116 if (likely(env->hflags & HF_GUEST_MASK)) {
117 return (env->features[FEAT_SVM] & CPUID_SVM_VGIF)
118 && (env->int_ctl & V_GIF_ENABLED_MASK);
119 }
120 return false;
121}
122
123static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t retaddr)
124{
125 uint64_t lbr_ctl;
126
127 if (likely(env->hflags & HF_GUEST_MASK)) {
128 if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) {
129 cpu_vmexit(env, exit_code, 0, retaddr);
130 }
131
132 lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb,
133 control.lbr_ctl));
134 return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD)
135 && (lbr_ctl & V_VMLOAD_VMSAVE_ENABLED_MASK);
136
137 }
138
139 return false;
140}
141
142static inline bool virtual_gif_set(CPUX86State *env)
143{
144 return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK);
145}
146
147void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
148{
149 CPUState *cs = env_cpu(env);
150 X86CPU *cpu = env_archcpu(env);
151 target_ulong addr;
152 uint64_t nested_ctl;
153 uint32_t event_inj;
154 uint32_t asid;
155 uint64_t new_cr0;
156 uint64_t new_cr3;
157 uint64_t new_cr4;
158
159 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
160
161 if (aflag == 2) {
162 addr = env->regs[R_EAX];
163 } else {
164 addr = (uint32_t)env->regs[R_EAX];
165 }
166
167 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
168
169 env->vm_vmcb = addr;
170
171
172 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
173 env->gdt.base);
174 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
175 env->gdt.limit);
176
177 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
178 env->idt.base);
179 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
180 env->idt.limit);
181
182 x86_stq_phys(cs,
183 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
184 x86_stq_phys(cs,
185 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
186 x86_stq_phys(cs,
187 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
188 x86_stq_phys(cs,
189 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
190 x86_stq_phys(cs,
191 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
192 x86_stq_phys(cs,
193 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
194
195 x86_stq_phys(cs,
196 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
197 x86_stq_phys(cs,
198 env->vm_hsave + offsetof(struct vmcb, save.rflags),
199 cpu_compute_eflags(env));
200
201 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
202 &env->segs[R_ES]);
203 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
204 &env->segs[R_CS]);
205 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
206 &env->segs[R_SS]);
207 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
208 &env->segs[R_DS]);
209
210 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
211 env->eip + next_eip_addend);
212 x86_stq_phys(cs,
213 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
214 x86_stq_phys(cs,
215 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
216
217
218
219 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
220 control.intercept));
221 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
222 offsetof(struct vmcb,
223 control.intercept_cr_read));
224 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
225 offsetof(struct vmcb,
226 control.intercept_cr_write));
227 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
228 offsetof(struct vmcb,
229 control.intercept_dr_read));
230 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
231 offsetof(struct vmcb,
232 control.intercept_dr_write));
233 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
234 offsetof(struct vmcb,
235 control.intercept_exceptions
236 ));
237
238 nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
239 control.nested_ctl));
240 asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
241 control.asid));
242
243 uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
244 offsetof(struct vmcb,
245 control.msrpm_base_pa));
246 uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
247 offsetof(struct vmcb, control.iopm_base_pa));
248
249 if ((msrpm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_MSRPM_SIZE) {
250 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
251 }
252
253 if ((iopm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_IOPM_SIZE) {
254 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
255 }
256
257 env->nested_pg_mode = 0;
258
259 if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) {
260 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
261 }
262 if (asid == 0) {
263 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
264 }
265
266 if (nested_ctl & SVM_NPT_ENABLED) {
267 env->nested_cr3 = x86_ldq_phys(cs,
268 env->vm_vmcb + offsetof(struct vmcb,
269 control.nested_cr3));
270 env->hflags2 |= HF2_NPT_MASK;
271
272 env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
273 }
274
275
276 env->hflags |= HF_GUEST_MASK;
277
278 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
279 offsetof(struct vmcb, control.tsc_offset));
280
281 new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
282 if (new_cr0 & SVM_CR0_RESERVED_MASK) {
283 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
284 }
285 if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
286 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
287 }
288 new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3));
289 if ((env->efer & MSR_EFER_LMA) &&
290 (new_cr3 & ((~0ULL) << cpu->phys_bits))) {
291 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
292 }
293 new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4));
294 if (new_cr4 & cr4_reserved_bits(env)) {
295 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
296 }
297
298 x86_stq_phys(cs,
299 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
300
301 cpu_x86_update_cr0(env, new_cr0);
302 cpu_x86_update_cr4(env, new_cr4);
303 cpu_x86_update_cr3(env, new_cr3);
304 env->cr[2] = x86_ldq_phys(cs,
305 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
306 env->int_ctl = x86_ldl_phys(cs,
307 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
308 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
309 if (env->int_ctl & V_INTR_MASKING_MASK) {
310 env->hflags2 |= HF2_VINTR_MASK;
311 if (env->eflags & IF_MASK) {
312 env->hflags2 |= HF2_HIF_MASK;
313 }
314 }
315
316 cpu_load_efer(env,
317 x86_ldq_phys(cs,
318 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
319 env->eflags = 0;
320 cpu_load_eflags(env, x86_ldq_phys(cs,
321 env->vm_vmcb + offsetof(struct vmcb,
322 save.rflags)),
323 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
324
325 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
326 R_ES);
327 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
328 R_CS);
329 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
330 R_SS);
331 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
332 R_DS);
333 svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.idtr),
334 &env->idt);
335 svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.gdtr),
336 &env->gdt);
337
338 env->eip = x86_ldq_phys(cs,
339 env->vm_vmcb + offsetof(struct vmcb, save.rip));
340
341 env->regs[R_ESP] = x86_ldq_phys(cs,
342 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
343 env->regs[R_EAX] = x86_ldq_phys(cs,
344 env->vm_vmcb + offsetof(struct vmcb, save.rax));
345 env->dr[7] = x86_ldq_phys(cs,
346 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
347 env->dr[6] = x86_ldq_phys(cs,
348 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
349
350#ifdef TARGET_X86_64
351 if (env->dr[6] & DR_RESERVED_MASK) {
352 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
353 }
354 if (env->dr[7] & DR_RESERVED_MASK) {
355 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
356 }
357#endif
358
359 if (is_efer_invalid_state(env)) {
360 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
361 }
362
363 switch (x86_ldub_phys(cs,
364 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
365 case TLB_CONTROL_DO_NOTHING:
366 break;
367 case TLB_CONTROL_FLUSH_ALL_ASID:
368
369 tlb_flush(cs);
370 break;
371 }
372
373 env->hflags2 |= HF2_GIF_MASK;
374
375 if (ctl_has_irq(env)) {
376 CPUState *cs = env_cpu(env);
377
378 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
379 }
380
381 if (virtual_gif_set(env)) {
382 env->hflags2 |= HF2_VGIF_MASK;
383 }
384
385
386 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
387 control.event_inj));
388 if (event_inj & SVM_EVTINJ_VALID) {
389 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
390 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
391 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
392 offsetof(struct vmcb,
393 control.event_inj_err));
394
395 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
396
397 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
398 case SVM_EVTINJ_TYPE_INTR:
399 cs->exception_index = vector;
400 env->error_code = event_inj_err;
401 env->exception_is_int = 0;
402 env->exception_next_eip = -1;
403 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
404
405 do_interrupt_x86_hardirq(env, vector, 1);
406 break;
407 case SVM_EVTINJ_TYPE_NMI:
408 cs->exception_index = EXCP02_NMI;
409 env->error_code = event_inj_err;
410 env->exception_is_int = 0;
411 env->exception_next_eip = env->eip;
412 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
413 cpu_loop_exit(cs);
414 break;
415 case SVM_EVTINJ_TYPE_EXEPT:
416 if (vector == EXCP02_NMI || vector >= 31) {
417 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
418 }
419 cs->exception_index = vector;
420 env->error_code = event_inj_err;
421 env->exception_is_int = 0;
422 env->exception_next_eip = -1;
423 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
424 cpu_loop_exit(cs);
425 break;
426 case SVM_EVTINJ_TYPE_SOFT:
427 cs->exception_index = vector;
428 env->error_code = event_inj_err;
429 env->exception_is_int = 1;
430 env->exception_next_eip = env->eip;
431 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
432 cpu_loop_exit(cs);
433 break;
434 default:
435 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
436 break;
437 }
438 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
439 env->error_code);
440 }
441}
442
443void helper_vmmcall(CPUX86State *env)
444{
445 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
446 raise_exception(env, EXCP06_ILLOP);
447}
448
449void helper_vmload(CPUX86State *env, int aflag)
450{
451 CPUState *cs = env_cpu(env);
452 target_ulong addr;
453 int prot;
454
455 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
456
457 if (aflag == 2) {
458 addr = env->regs[R_EAX];
459 } else {
460 addr = (uint32_t)env->regs[R_EAX];
461 }
462
463 if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) {
464 addr = get_hphys(cs, addr, MMU_DATA_LOAD, &prot);
465 }
466
467 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
468 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
469 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
470 save.fs.base)),
471 env->segs[R_FS].base);
472
473 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
474 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
475 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
476 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
477
478#ifdef TARGET_X86_64
479 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
480 save.kernel_gs_base));
481 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
482 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
483 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
484 svm_canonicalization(env, &env->kernelgsbase);
485#endif
486 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
487 env->sysenter_cs = x86_ldq_phys(cs,
488 addr + offsetof(struct vmcb, save.sysenter_cs));
489 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
490 save.sysenter_esp));
491 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
492 save.sysenter_eip));
493
494}
495
496void helper_vmsave(CPUX86State *env, int aflag)
497{
498 CPUState *cs = env_cpu(env);
499 target_ulong addr;
500 int prot;
501
502 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
503
504 if (aflag == 2) {
505 addr = env->regs[R_EAX];
506 } else {
507 addr = (uint32_t)env->regs[R_EAX];
508 }
509
510 if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) {
511 addr = get_hphys(cs, addr, MMU_DATA_STORE, &prot);
512 }
513
514 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
515 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
516 addr, x86_ldq_phys(cs,
517 addr + offsetof(struct vmcb, save.fs.base)),
518 env->segs[R_FS].base);
519
520 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
521 &env->segs[R_FS]);
522 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
523 &env->segs[R_GS]);
524 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
525 &env->tr);
526 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
527 &env->ldt);
528
529#ifdef TARGET_X86_64
530 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
531 env->kernelgsbase);
532 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
533 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
534 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
535#endif
536 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
537 x86_stq_phys(cs,
538 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
539 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
540 env->sysenter_esp);
541 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
542 env->sysenter_eip);
543}
544
545void helper_stgi(CPUX86State *env)
546{
547 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
548
549 if (virtual_gif_enabled(env)) {
550 env->int_ctl |= V_GIF_MASK;
551 env->hflags2 |= HF2_VGIF_MASK;
552 } else {
553 env->hflags2 |= HF2_GIF_MASK;
554 }
555}
556
557void helper_clgi(CPUX86State *env)
558{
559 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
560
561 if (virtual_gif_enabled(env)) {
562 env->int_ctl &= ~V_GIF_MASK;
563 env->hflags2 &= ~HF2_VGIF_MASK;
564 } else {
565 env->hflags2 &= ~HF2_GIF_MASK;
566 }
567}
568
569bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
570{
571 switch (type) {
572 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
573 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
574 return true;
575 }
576 break;
577 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
578 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
579 return true;
580 }
581 break;
582 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
583 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
584 return true;
585 }
586 break;
587 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
588 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
589 return true;
590 }
591 break;
592 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
593 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
594 return true;
595 }
596 break;
597 default:
598 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
599 return true;
600 }
601 break;
602 }
603 return false;
604}
605
606void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
607 uint64_t param, uintptr_t retaddr)
608{
609 CPUState *cs = env_cpu(env);
610
611 if (likely(!(env->hflags & HF_GUEST_MASK))) {
612 return;
613 }
614
615 if (!cpu_svm_has_intercept(env, type)) {
616 return;
617 }
618
619 if (type == SVM_EXIT_MSR) {
620
621 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
622 offsetof(struct vmcb,
623 control.msrpm_base_pa));
624 uint32_t t0, t1;
625
626 switch ((uint32_t)env->regs[R_ECX]) {
627 case 0 ... 0x1fff:
628 t0 = (env->regs[R_ECX] * 2) % 8;
629 t1 = (env->regs[R_ECX] * 2) / 8;
630 break;
631 case 0xc0000000 ... 0xc0001fff:
632 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
633 t1 = (t0 / 8);
634 t0 %= 8;
635 break;
636 case 0xc0010000 ... 0xc0011fff:
637 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
638 t1 = (t0 / 8);
639 t0 %= 8;
640 break;
641 default:
642 cpu_vmexit(env, type, param, retaddr);
643 t0 = 0;
644 t1 = 0;
645 break;
646 }
647 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
648 cpu_vmexit(env, type, param, retaddr);
649 }
650 return;
651 }
652
653 cpu_vmexit(env, type, param, retaddr);
654}
655
656void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
657{
658 cpu_svm_check_intercept_param(env, type, 0, GETPC());
659}
660
661void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
662 uint32_t next_eip_addend)
663{
664 CPUState *cs = env_cpu(env);
665
666 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
667
668 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
669 offsetof(struct vmcb, control.iopm_base_pa));
670 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
671
672 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
673
674 x86_stq_phys(cs,
675 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
676 env->eip + next_eip_addend);
677 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
678 }
679 }
680}
681
682void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
683 uintptr_t retaddr)
684{
685 CPUState *cs = env_cpu(env);
686
687 cpu_restore_state(cs, retaddr, true);
688
689 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
690 PRIx64 ", " TARGET_FMT_lx ")!\n",
691 exit_code, exit_info_1,
692 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
693 control.exit_info_2)),
694 env->eip);
695
696 cs->exception_index = EXCP_VMEXIT;
697 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
698 exit_code);
699
700 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
701 control.exit_info_1), exit_info_1),
702
703
704 env->old_exception = -1;
705 cpu_loop_exit(cs);
706}
707
708void do_vmexit(CPUX86State *env)
709{
710 CPUState *cs = env_cpu(env);
711
712 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
713 x86_stl_phys(cs,
714 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
715 SVM_INTERRUPT_SHADOW_MASK);
716 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
717 } else {
718 x86_stl_phys(cs,
719 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
720 }
721 env->hflags2 &= ~HF2_NPT_MASK;
722
723
724 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
725 &env->segs[R_ES]);
726 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
727 &env->segs[R_CS]);
728 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
729 &env->segs[R_SS]);
730 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
731 &env->segs[R_DS]);
732
733 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
734 env->gdt.base);
735 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
736 env->gdt.limit);
737
738 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
739 env->idt.base);
740 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
741 env->idt.limit);
742
743 x86_stq_phys(cs,
744 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
745 x86_stq_phys(cs,
746 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
747 x86_stq_phys(cs,
748 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
749 x86_stq_phys(cs,
750 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
751 x86_stq_phys(cs,
752 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
753 x86_stl_phys(cs,
754 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl);
755
756 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
757 cpu_compute_eflags(env));
758 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
759 env->eip);
760 x86_stq_phys(cs,
761 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
762 x86_stq_phys(cs,
763 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
764 x86_stq_phys(cs,
765 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
766 x86_stq_phys(cs,
767 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
768 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
769 env->hflags & HF_CPL_MASK);
770
771
772 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
773 env->hflags &= ~HF_GUEST_MASK;
774 env->intercept = 0;
775 env->intercept_exceptions = 0;
776 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
777 env->int_ctl = 0;
778 env->tsc_offset = 0;
779
780 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
781 save.gdtr.base));
782 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
783 save.gdtr.limit));
784
785 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
786 save.idtr.base));
787 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
788 save.idtr.limit));
789
790 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
791 env->vm_hsave + offsetof(struct vmcb,
792 save.cr0)) |
793 CR0_PE_MASK);
794 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
795 env->vm_hsave + offsetof(struct vmcb,
796 save.cr4)));
797 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
798 env->vm_hsave + offsetof(struct vmcb,
799 save.cr3)));
800
801
802 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
803 save.efer)));
804 env->eflags = 0;
805 cpu_load_eflags(env, x86_ldq_phys(cs,
806 env->vm_hsave + offsetof(struct vmcb,
807 save.rflags)),
808 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
809 VM_MASK));
810
811 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
812 R_ES);
813 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
814 R_CS);
815 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
816 R_SS);
817 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
818 R_DS);
819
820 env->eip = x86_ldq_phys(cs,
821 env->vm_hsave + offsetof(struct vmcb, save.rip));
822 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
823 offsetof(struct vmcb, save.rsp));
824 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
825 offsetof(struct vmcb, save.rax));
826
827 env->dr[6] = x86_ldq_phys(cs,
828 env->vm_hsave + offsetof(struct vmcb, save.dr6));
829 env->dr[7] = x86_ldq_phys(cs,
830 env->vm_hsave + offsetof(struct vmcb, save.dr7));
831
832
833 x86_stl_phys(cs,
834 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
835 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
836 control.event_inj)));
837 x86_stl_phys(cs,
838 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
839 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
840 control.event_inj_err)));
841 x86_stl_phys(cs,
842 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
843
844 env->hflags2 &= ~HF2_GIF_MASK;
845 env->hflags2 &= ~HF2_VGIF_MASK;
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863}
864