1
2#ifndef __KVM_X86_VMX_H
3#define __KVM_X86_VMX_H
4
5#include <linux/kvm_host.h>
6
7#include <asm/kvm.h>
8#include <asm/intel_pt.h>
9
10#include "capabilities.h"
11#include "ops.h"
12#include "vmcs.h"
13
14extern const u32 vmx_msr_index[];
15extern u64 host_efer;
16
17#define MSR_TYPE_R 1
18#define MSR_TYPE_W 2
19#define MSR_TYPE_RW 3
20
21#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
22
23#define NR_AUTOLOAD_MSRS 8
24
25struct vmx_msrs {
26 unsigned int nr;
27 struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
28};
29
30struct shared_msr_entry {
31 unsigned index;
32 u64 data;
33 u64 mask;
34};
35
36enum segment_cache_field {
37 SEG_FIELD_SEL = 0,
38 SEG_FIELD_BASE = 1,
39 SEG_FIELD_LIMIT = 2,
40 SEG_FIELD_AR = 3,
41
42 SEG_FIELD_NR = 4
43};
44
45
46struct pi_desc {
47 u32 pir[8];
48 union {
49 struct {
50
51 u16 on : 1,
52
53 sn : 1,
54
55 rsvd_1 : 14;
56
57 u8 nv;
58
59 u8 rsvd_2;
60
61 u32 ndst;
62 };
63 u64 control;
64 };
65 u32 rsvd[6];
66} __aligned(64);
67
68#define RTIT_ADDR_RANGE 4
69
70struct pt_ctx {
71 u64 ctl;
72 u64 status;
73 u64 output_base;
74 u64 output_mask;
75 u64 cr3_match;
76 u64 addr_a[RTIT_ADDR_RANGE];
77 u64 addr_b[RTIT_ADDR_RANGE];
78};
79
80struct pt_desc {
81 u64 ctl_bitmask;
82 u32 addr_range;
83 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
84 struct pt_ctx host;
85 struct pt_ctx guest;
86};
87
88
89
90
91
92struct nested_vmx {
93
94 bool vmxon;
95 gpa_t vmxon_ptr;
96 bool pml_full;
97
98
99 gpa_t current_vmptr;
100
101
102
103
104
105 struct vmcs12 *cached_vmcs12;
106
107
108
109
110
111 struct vmcs12 *cached_shadow_vmcs12;
112
113
114
115
116
117 bool need_vmcs12_to_shadow_sync;
118 bool dirty_vmcs12;
119
120
121
122
123
124 bool need_sync_vmcs02_to_vmcs12_rare;
125
126
127
128
129
130
131 bool vmcs02_initialized;
132
133 bool change_vmcs01_virtual_apic_mode;
134
135
136
137
138
139
140 bool enlightened_vmcs_enabled;
141
142
143 bool nested_run_pending;
144
145 struct loaded_vmcs vmcs02;
146
147
148
149
150
151 struct page *apic_access_page;
152 struct kvm_host_map virtual_apic_map;
153 struct kvm_host_map pi_desc_map;
154
155 struct kvm_host_map msr_bitmap_map;
156
157 struct pi_desc *pi_desc;
158 bool pi_pending;
159 u16 posted_intr_nv;
160
161 struct hrtimer preemption_timer;
162 bool preemption_timer_expired;
163
164
165 u64 vmcs01_debugctl;
166 u64 vmcs01_guest_bndcfgs;
167
168 u16 vpid02;
169 u16 last_vpid;
170
171 struct nested_vmx_msrs msrs;
172
173
174 struct {
175
176 bool vmxon;
177
178 bool guest_mode;
179 } smm;
180
181 gpa_t hv_evmcs_vmptr;
182 struct kvm_host_map hv_evmcs_map;
183 struct hv_enlightened_vmcs *hv_evmcs;
184};
185
186struct vcpu_vmx {
187 struct kvm_vcpu vcpu;
188 u8 fail;
189 u8 msr_bitmap_mode;
190
191
192
193
194
195
196
197
198 bool guest_state_loaded;
199
200 u32 exit_intr_info;
201 u32 idt_vectoring_info;
202 ulong rflags;
203
204 struct shared_msr_entry *guest_msrs;
205 int nmsrs;
206 int save_nmsrs;
207 bool guest_msrs_ready;
208#ifdef CONFIG_X86_64
209 u64 msr_host_kernel_gs_base;
210 u64 msr_guest_kernel_gs_base;
211#endif
212
213 u64 spec_ctrl;
214
215 u32 secondary_exec_control;
216
217
218
219
220
221
222 struct loaded_vmcs vmcs01;
223 struct loaded_vmcs *loaded_vmcs;
224
225 struct msr_autoload {
226 struct vmx_msrs guest;
227 struct vmx_msrs host;
228 } msr_autoload;
229
230 struct {
231 int vm86_active;
232 ulong save_rflags;
233 struct kvm_segment segs[8];
234 } rmode;
235 struct {
236 u32 bitmask;
237 struct kvm_save_segment {
238 u16 selector;
239 unsigned long base;
240 u32 limit;
241 u32 ar;
242 } seg[8];
243 } segment_cache;
244 int vpid;
245 bool emulation_required;
246
247 u32 exit_reason;
248
249
250 struct pi_desc pi_desc;
251
252
253 struct nested_vmx nested;
254
255
256 int ple_window;
257 bool ple_window_dirty;
258
259 bool req_immediate_exit;
260
261
262#define PML_ENTITY_NUM 512
263 struct page *pml_pg;
264
265
266 u64 hv_deadline_tsc;
267
268 u64 current_tsc_ratio;
269
270 u32 host_pkru;
271
272 unsigned long host_debugctlmsr;
273
274
275
276
277
278
279 u64 msr_ia32_feature_control;
280 u64 msr_ia32_feature_control_valid_bits;
281 u64 ept_pointer;
282
283 struct pt_desc pt_desc;
284};
285
286enum ept_pointers_status {
287 EPT_POINTERS_CHECK = 0,
288 EPT_POINTERS_MATCH = 1,
289 EPT_POINTERS_MISMATCH = 2
290};
291
292struct kvm_vmx {
293 struct kvm kvm;
294
295 unsigned int tss_addr;
296 bool ept_identity_pagetable_done;
297 gpa_t ept_identity_map_addr;
298
299 enum ept_pointers_status ept_pointers_match;
300 spinlock_t ept_pointer_lock;
301};
302
303bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
304void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu);
305void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
306int allocate_vpid(void);
307void free_vpid(int vpid);
308void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
309void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
310void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
311 unsigned long fs_base, unsigned long gs_base);
312int vmx_get_cpl(struct kvm_vcpu *vcpu);
313unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
314void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
315u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
316void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
317void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
318void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
319void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
320int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
321void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
322void ept_save_pdptrs(struct kvm_vcpu *vcpu);
323void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
324void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
325u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
326void update_exception_bitmap(struct kvm_vcpu *vcpu);
327void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
328bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
329void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
330void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
331struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
332void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
333void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
334
335#define POSTED_INTR_ON 0
336#define POSTED_INTR_SN 1
337
338static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
339{
340 return test_and_set_bit(POSTED_INTR_ON,
341 (unsigned long *)&pi_desc->control);
342}
343
344static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
345{
346 return test_and_clear_bit(POSTED_INTR_ON,
347 (unsigned long *)&pi_desc->control);
348}
349
350static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
351{
352 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
353}
354
355static inline void pi_set_sn(struct pi_desc *pi_desc)
356{
357 set_bit(POSTED_INTR_SN,
358 (unsigned long *)&pi_desc->control);
359}
360
361static inline void pi_set_on(struct pi_desc *pi_desc)
362{
363 set_bit(POSTED_INTR_ON,
364 (unsigned long *)&pi_desc->control);
365}
366
367static inline void pi_clear_on(struct pi_desc *pi_desc)
368{
369 clear_bit(POSTED_INTR_ON,
370 (unsigned long *)&pi_desc->control);
371}
372
373static inline int pi_test_on(struct pi_desc *pi_desc)
374{
375 return test_bit(POSTED_INTR_ON,
376 (unsigned long *)&pi_desc->control);
377}
378
379static inline int pi_test_sn(struct pi_desc *pi_desc)
380{
381 return test_bit(POSTED_INTR_SN,
382 (unsigned long *)&pi_desc->control);
383}
384
385static inline u8 vmx_get_rvi(void)
386{
387 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
388}
389
390#define BUILD_CONTROLS_SHADOW(lname, uname) \
391static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
392{ \
393 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
394 vmcs_write32(uname, val); \
395 vmx->loaded_vmcs->controls_shadow.lname = val; \
396 } \
397} \
398static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
399{ \
400 return vmx->loaded_vmcs->controls_shadow.lname; \
401} \
402static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
403{ \
404 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
405} \
406static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
407{ \
408 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
409}
410BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
411BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
412BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
413BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
414BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
415
416static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
417{
418 vmx->segment_cache.bitmask = 0;
419}
420
421static inline u32 vmx_vmentry_ctrl(void)
422{
423 u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
424 if (pt_mode == PT_MODE_SYSTEM)
425 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
426 VM_ENTRY_LOAD_IA32_RTIT_CTL);
427
428 return vmentry_ctrl &
429 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
430}
431
432static inline u32 vmx_vmexit_ctrl(void)
433{
434 u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
435 if (pt_mode == PT_MODE_SYSTEM)
436 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
437 VM_EXIT_CLEAR_IA32_RTIT_CTL);
438
439 return vmexit_ctrl &
440 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
441}
442
443u32 vmx_exec_control(struct vcpu_vmx *vmx);
444u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx);
445
446static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
447{
448 return container_of(kvm, struct kvm_vmx, kvm);
449}
450
451static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
452{
453 return container_of(vcpu, struct vcpu_vmx, vcpu);
454}
455
456static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
457{
458 return &(to_vmx(vcpu)->pi_desc);
459}
460
461struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
462void free_vmcs(struct vmcs *vmcs);
463int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
464void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
465void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs);
466void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
467
468static inline struct vmcs *alloc_vmcs(bool shadow)
469{
470 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
471 GFP_KERNEL_ACCOUNT);
472}
473
474u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
475
476static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
477 bool invalidate_gpa)
478{
479 if (enable_ept && (invalidate_gpa || !enable_vpid)) {
480 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
481 return;
482 ept_sync_context(construct_eptp(vcpu,
483 vcpu->arch.mmu->root_hpa));
484 } else {
485 vpid_sync_context(vpid);
486 }
487}
488
489static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
490{
491 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
492}
493
494static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
495{
496 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
497 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
498}
499
500void dump_vmcs(void);
501
502#endif
503