1
2#ifndef __KVM_X86_VMX_NESTED_H
3#define __KVM_X86_VMX_NESTED_H
4
5#include "kvm_cache_regs.h"
6#include "vmcs12.h"
7#include "vmx.h"
8
9
10
11
12enum nvmx_vmentry_status {
13 NVMX_VMENTRY_SUCCESS,
14 NVMX_VMENTRY_VMFAIL,
15 NVMX_VMENTRY_VMEXIT,
16 NVMX_VMENTRY_KVM_INTERNAL_ERROR,
17};
18
19void vmx_leave_nested(struct kvm_vcpu *vcpu);
20void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
21void nested_vmx_hardware_unsetup(void);
22__init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops,
23 int (*exit_handlers[])(struct kvm_vcpu *));
24void nested_vmx_set_vmcs_shadowing_bitmap(void);
25void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
26enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
27 bool from_vmentry);
28bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason);
29void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
30 u32 exit_intr_info, unsigned long exit_qualification);
31void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu);
32int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
33int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
34int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
35 u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
36void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
37void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu);
38bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
39 int size);
40
41static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
42{
43 return to_vmx(vcpu)->nested.cached_vmcs12;
44}
45
46static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
47{
48 return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
49}
50
51static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
52{
53 struct vcpu_vmx *vmx = to_vmx(vcpu);
54
55
56
57
58
59
60
61 return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
62 vmx->nested.hv_evmcs;
63}
64
65static inline unsigned long nested_ept_get_eptp(struct kvm_vcpu *vcpu)
66{
67
68 return get_vmcs12(vcpu)->ept_pointer;
69}
70
71static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
72{
73 return nested_ept_get_eptp(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
74}
75
76
77
78
79static inline int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu,
80 u32 exit_reason)
81{
82 u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
83
84
85
86
87
88
89 WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT);
90 if ((exit_intr_info &
91 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
92 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) {
93 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
94
95 vmcs12->vm_exit_intr_error_code =
96 vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
97 }
98
99 nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info,
100 vmcs_readl(EXIT_QUALIFICATION));
101 return 1;
102}
103
104
105
106
107
108
109static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
110{
111 return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
112 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
113}
114static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
115{
116 return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
117 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
118}
119
120static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
121{
122 return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
123}
124
125
126
127
128
129
130static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
131{
132 return to_vmx(vcpu)->nested.msrs.misc_low &
133 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
134}
135
136static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
137{
138 return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
139}
140
141static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
142{
143 return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
144 CPU_BASED_MONITOR_TRAP_FLAG;
145}
146
147static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
148{
149 return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
150 SECONDARY_EXEC_SHADOW_VMCS;
151}
152
153static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
154{
155 return vmcs12->cpu_based_vm_exec_control & bit;
156}
157
158static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
159{
160 return (vmcs12->cpu_based_vm_exec_control &
161 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
162 (vmcs12->secondary_vm_exec_control & bit);
163}
164
165static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
166{
167 return vmcs12->pin_based_vm_exec_control &
168 PIN_BASED_VMX_PREEMPTION_TIMER;
169}
170
171static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
172{
173 return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
174}
175
176static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
177{
178 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
179}
180
181static inline int nested_cpu_has_mtf(struct vmcs12 *vmcs12)
182{
183 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
184}
185
186static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
187{
188 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
189}
190
191static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
192{
193 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
194}
195
196static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
197{
198 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
199}
200
201static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
202{
203 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
204}
205
206static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
207{
208 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
209}
210
211static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
212{
213 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
214}
215
216static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
217{
218 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
219}
220
221static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
222{
223 return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
224}
225
226static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
227{
228 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
229}
230
231static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
232{
233 return nested_cpu_has_vmfunc(vmcs12) &&
234 (vmcs12->vm_function_control &
235 VMX_VMFUNC_EPTP_SWITCHING);
236}
237
238static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
239{
240 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
241}
242
243static inline bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12)
244{
245 return vmcs12->vm_exit_controls &
246 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
247}
248
249
250
251
252
253static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
254{
255 return get_vmcs12(vcpu)->pin_based_vm_exec_control &
256 PIN_BASED_EXT_INTR_MASK;
257}
258
259
260
261
262
263static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
264{
265 return ((val & fixed1) | fixed0) == val;
266}
267
268static inline bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
269{
270 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
271 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
272 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
273
274 if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
275 SECONDARY_EXEC_UNRESTRICTED_GUEST &&
276 nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
277 fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
278
279 return fixed_bits_valid(val, fixed0, fixed1);
280}
281
282static inline bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
283{
284 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
285 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
286
287 return fixed_bits_valid(val, fixed0, fixed1);
288}
289
290static inline bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
291{
292 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
293 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
294
295 return fixed_bits_valid(val, fixed0, fixed1);
296}
297
298
299#define nested_guest_cr4_valid nested_cr4_valid
300#define nested_host_cr4_valid nested_cr4_valid
301
302#endif
303