1
2#ifndef __KVM_X86_VMX_H
3#define __KVM_X86_VMX_H
4
5#include <linux/kvm_host.h>
6
7#include <asm/kvm.h>
8#include <asm/intel_pt.h>
9
10#include "capabilities.h"
11#include "../kvm_cache_regs.h"
12#include "posted_intr.h"
13#include "vmcs.h"
14#include "vmx_ops.h"
15#include "../cpuid.h"
16#include "run_flags.h"
17
18#define MSR_TYPE_R 1
19#define MSR_TYPE_W 2
20#define MSR_TYPE_RW 3
21
22#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
23
24#ifdef CONFIG_X86_64
25#define MAX_NR_USER_RETURN_MSRS 7
26#else
27#define MAX_NR_USER_RETURN_MSRS 4
28#endif
29
30#define MAX_NR_LOADSTORE_MSRS 8
31
32struct vmx_msrs {
33 unsigned int nr;
34 struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
35};
36
37struct vmx_uret_msr {
38 bool load_into_hardware;
39 u64 data;
40 u64 mask;
41};
42
43enum segment_cache_field {
44 SEG_FIELD_SEL = 0,
45 SEG_FIELD_BASE = 1,
46 SEG_FIELD_LIMIT = 2,
47 SEG_FIELD_AR = 3,
48
49 SEG_FIELD_NR = 4
50};
51
52#define RTIT_ADDR_RANGE 4
53
54struct pt_ctx {
55 u64 ctl;
56 u64 status;
57 u64 output_base;
58 u64 output_mask;
59 u64 cr3_match;
60 u64 addr_a[RTIT_ADDR_RANGE];
61 u64 addr_b[RTIT_ADDR_RANGE];
62};
63
64struct pt_desc {
65 u64 ctl_bitmask;
66 u32 num_address_ranges;
67 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
68 struct pt_ctx host;
69 struct pt_ctx guest;
70};
71
72union vmx_exit_reason {
73 struct {
74 u32 basic : 16;
75 u32 reserved16 : 1;
76 u32 reserved17 : 1;
77 u32 reserved18 : 1;
78 u32 reserved19 : 1;
79 u32 reserved20 : 1;
80 u32 reserved21 : 1;
81 u32 reserved22 : 1;
82 u32 reserved23 : 1;
83 u32 reserved24 : 1;
84 u32 reserved25 : 1;
85 u32 bus_lock_detected : 1;
86 u32 enclave_mode : 1;
87 u32 smi_pending_mtf : 1;
88 u32 smi_from_vmx_root : 1;
89 u32 reserved30 : 1;
90 u32 failed_vmentry : 1;
91 };
92 u32 full;
93};
94
95#define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc)
96#define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records)
97
98bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu);
99bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
100
101int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
102void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
103
104struct lbr_desc {
105
106 struct x86_pmu_lbr records;
107
108
109
110
111
112
113
114 struct perf_event *event;
115
116
117 bool msr_passthrough;
118};
119
120
121
122
123
124struct nested_vmx {
125
126 bool vmxon;
127 gpa_t vmxon_ptr;
128 bool pml_full;
129
130
131 gpa_t current_vmptr;
132
133
134
135
136
137 struct vmcs12 *cached_vmcs12;
138
139
140
141
142
143 struct vmcs12 *cached_shadow_vmcs12;
144
145
146
147
148 struct gfn_to_hva_cache shadow_vmcs12_cache;
149
150
151
152
153 struct gfn_to_hva_cache vmcs12_cache;
154
155
156
157
158
159 bool need_vmcs12_to_shadow_sync;
160 bool dirty_vmcs12;
161
162
163
164
165
166
167
168
169 bool force_msr_bitmap_recalc;
170
171
172
173
174
175 bool need_sync_vmcs02_to_vmcs12_rare;
176
177
178
179
180
181
182 bool vmcs02_initialized;
183
184 bool change_vmcs01_virtual_apic_mode;
185 bool reload_vmcs01_apic_access_page;
186 bool update_vmcs01_cpu_dirty_logging;
187 bool update_vmcs01_apicv_status;
188
189
190
191
192
193
194 bool enlightened_vmcs_enabled;
195
196
197 bool nested_run_pending;
198
199
200 bool mtf_pending;
201
202 struct loaded_vmcs vmcs02;
203
204
205
206
207
208 struct page *apic_access_page;
209 struct kvm_host_map virtual_apic_map;
210 struct kvm_host_map pi_desc_map;
211
212 struct kvm_host_map msr_bitmap_map;
213
214 struct pi_desc *pi_desc;
215 bool pi_pending;
216 u16 posted_intr_nv;
217
218 struct hrtimer preemption_timer;
219 u64 preemption_timer_deadline;
220 bool has_preemption_timer_deadline;
221 bool preemption_timer_expired;
222
223
224 u64 vmcs01_debugctl;
225 u64 vmcs01_guest_bndcfgs;
226
227
228 int l1_tpr_threshold;
229
230 u16 vpid02;
231 u16 last_vpid;
232
233 struct nested_vmx_msrs msrs;
234
235
236 struct {
237
238 bool vmxon;
239
240 bool guest_mode;
241 } smm;
242
243 gpa_t hv_evmcs_vmptr;
244 struct kvm_host_map hv_evmcs_map;
245 struct hv_enlightened_vmcs *hv_evmcs;
246};
247
248struct vcpu_vmx {
249 struct kvm_vcpu vcpu;
250 u8 fail;
251 u8 x2apic_msr_bitmap_mode;
252
253
254
255
256
257
258
259
260 bool guest_state_loaded;
261
262 unsigned long exit_qualification;
263 u32 exit_intr_info;
264 u32 idt_vectoring_info;
265 ulong rflags;
266
267
268
269
270
271
272
273 struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
274 bool guest_uret_msrs_loaded;
275#ifdef CONFIG_X86_64
276 u64 msr_host_kernel_gs_base;
277 u64 msr_guest_kernel_gs_base;
278#endif
279
280 u64 spec_ctrl;
281 u32 msr_ia32_umwait_control;
282
283
284
285
286
287
288 struct loaded_vmcs vmcs01;
289 struct loaded_vmcs *loaded_vmcs;
290
291 struct msr_autoload {
292 struct vmx_msrs guest;
293 struct vmx_msrs host;
294 } msr_autoload;
295
296 struct msr_autostore {
297 struct vmx_msrs guest;
298 } msr_autostore;
299
300 struct {
301 int vm86_active;
302 ulong save_rflags;
303 struct kvm_segment segs[8];
304 } rmode;
305 struct {
306 u32 bitmask;
307 struct kvm_save_segment {
308 u16 selector;
309 unsigned long base;
310 u32 limit;
311 u32 ar;
312 } seg[8];
313 } segment_cache;
314 int vpid;
315 bool emulation_required;
316
317 union vmx_exit_reason exit_reason;
318
319
320 struct pi_desc pi_desc;
321
322
323 struct list_head pi_wakeup_list;
324
325
326 struct nested_vmx nested;
327
328
329 unsigned int ple_window;
330 bool ple_window_dirty;
331
332 bool req_immediate_exit;
333
334
335#define PML_ENTITY_NUM 512
336 struct page *pml_pg;
337
338
339 u64 hv_deadline_tsc;
340
341 unsigned long host_debugctlmsr;
342
343
344
345
346
347
348 u64 msr_ia32_feature_control;
349 u64 msr_ia32_feature_control_valid_bits;
350
351 u64 msr_ia32_sgxlepubkeyhash[4];
352 u64 msr_ia32_mcu_opt_ctrl;
353 bool disable_fb_clear;
354
355 struct pt_desc pt_desc;
356 struct lbr_desc lbr_desc;
357
358
359#define MAX_POSSIBLE_PASSTHROUGH_MSRS 15
360 struct {
361 DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
362 DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
363 } shadow_msr_intercept;
364};
365
366struct kvm_vmx {
367 struct kvm kvm;
368
369 unsigned int tss_addr;
370 bool ept_identity_pagetable_done;
371 gpa_t ept_identity_map_addr;
372};
373
374bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
375void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
376 struct loaded_vmcs *buddy);
377int allocate_vpid(void);
378void free_vpid(int vpid);
379void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
380void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
381void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
382 unsigned long fs_base, unsigned long gs_base);
383int vmx_get_cpl(struct kvm_vcpu *vcpu);
384bool vmx_emulation_required(struct kvm_vcpu *vcpu);
385unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
386void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
387u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
388void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
389int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
390void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
391void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
392void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
393void ept_save_pdptrs(struct kvm_vcpu *vcpu);
394void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
395void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
396u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
397
398bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
399void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
400bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
401bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
402bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
403void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
404void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
405struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
406void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
407void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
408void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags);
409unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx);
410bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
411 unsigned int flags);
412int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
413void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
414
415void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
416void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
417
418u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
419u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
420
421static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
422 int type, bool value)
423{
424 if (value)
425 vmx_enable_intercept_for_msr(vcpu, msr, type);
426 else
427 vmx_disable_intercept_for_msr(vcpu, msr, type);
428}
429
430void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
431
432
433
434
435
436
437
438
439
440#define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) \
441static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \
442 u32 msr) \
443{ \
444 int f = sizeof(unsigned long); \
445 \
446 if (msr <= 0x1fff) \
447 return bitop##_bit(msr, bitmap + base / f); \
448 else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \
449 return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
450 return (rtype)true; \
451}
452#define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
453 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \
454 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
455
456BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)
457BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear)
458BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)
459
460static inline u8 vmx_get_rvi(void)
461{
462 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
463}
464
465#define BUILD_CONTROLS_SHADOW(lname, uname) \
466static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
467{ \
468 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
469 vmcs_write32(uname, val); \
470 vmx->loaded_vmcs->controls_shadow.lname = val; \
471 } \
472} \
473static inline u32 __##lname##_controls_get(struct loaded_vmcs *vmcs) \
474{ \
475 return vmcs->controls_shadow.lname; \
476} \
477static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
478{ \
479 return __##lname##_controls_get(vmx->loaded_vmcs); \
480} \
481static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
482{ \
483 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
484} \
485static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
486{ \
487 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
488}
489BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
490BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
491BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
492BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
493BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
494
495
496
497
498
499
500#define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) | \
501 (1 << VCPU_REGS_RSP) | \
502 (1 << VCPU_EXREG_RFLAGS) | \
503 (1 << VCPU_EXREG_PDPTR) | \
504 (1 << VCPU_EXREG_SEGMENTS) | \
505 (1 << VCPU_EXREG_CR0) | \
506 (1 << VCPU_EXREG_CR3) | \
507 (1 << VCPU_EXREG_CR4) | \
508 (1 << VCPU_EXREG_EXIT_INFO_1) | \
509 (1 << VCPU_EXREG_EXIT_INFO_2))
510
511static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
512{
513 return container_of(kvm, struct kvm_vmx, kvm);
514}
515
516static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
517{
518 return container_of(vcpu, struct vcpu_vmx, vcpu);
519}
520
521static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
522{
523 struct vcpu_vmx *vmx = to_vmx(vcpu);
524
525 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
526 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
527 vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
528 }
529 return vmx->exit_qualification;
530}
531
532static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
533{
534 struct vcpu_vmx *vmx = to_vmx(vcpu);
535
536 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
537 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
538 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
539 }
540 return vmx->exit_intr_info;
541}
542
543struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
544void free_vmcs(struct vmcs *vmcs);
545int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
546void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
547void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
548
549static inline struct vmcs *alloc_vmcs(bool shadow)
550{
551 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
552 GFP_KERNEL_ACCOUNT);
553}
554
555static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
556{
557 return secondary_exec_controls_get(vmx) &
558 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
559}
560
561static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
562{
563 if (!enable_ept)
564 return true;
565
566 return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
567}
568
569static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
570{
571 return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
572 (secondary_exec_controls_get(to_vmx(vcpu)) &
573 SECONDARY_EXEC_UNRESTRICTED_GUEST));
574}
575
576bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
577static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
578{
579 return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
580}
581
582void dump_vmcs(struct kvm_vcpu *vcpu);
583
584static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info)
585{
586 return (vmx_instr_info >> 28) & 0xf;
587}
588
589#endif
590