1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "irq.h"
20#include "mmu.h"
21#include "cpuid.h"
22
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/highmem.h>
28#include <linux/sched.h>
29#include <linux/moduleparam.h>
30#include <linux/mod_devicetable.h>
31#include <linux/ftrace_event.h>
32#include <linux/slab.h>
33#include <linux/tboot.h>
34#include "kvm_cache_regs.h"
35#include "x86.h"
36
37#include <asm/io.h>
38#include <asm/desc.h>
39#include <asm/vmx.h>
40#include <asm/virtext.h>
41#include <asm/mce.h>
42#include <asm/i387.h>
43#include <asm/xcr.h>
44#include <asm/perf_event.h>
45#include <asm/kexec.h>
46
47#include "trace.h"
48
49#define __ex(x) __kvm_handle_fault_on_reboot(x)
50#define __ex_clear(x, reg) \
51 ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
52
53MODULE_AUTHOR("Qumranet");
54MODULE_LICENSE("GPL");
55
56static const struct x86_cpu_id vmx_cpu_id[] = {
57 X86_FEATURE_MATCH(X86_FEATURE_VMX),
58 {}
59};
60MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
61
62static bool __read_mostly enable_vpid = 1;
63module_param_named(vpid, enable_vpid, bool, 0444);
64
65static bool __read_mostly flexpriority_enabled = 1;
66module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
67
68static bool __read_mostly enable_ept = 1;
69module_param_named(ept, enable_ept, bool, S_IRUGO);
70
71static bool __read_mostly enable_unrestricted_guest = 1;
72module_param_named(unrestricted_guest,
73 enable_unrestricted_guest, bool, S_IRUGO);
74
75static bool __read_mostly enable_ept_ad_bits = 1;
76module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
77
78static bool __read_mostly emulate_invalid_guest_state = true;
79module_param(emulate_invalid_guest_state, bool, S_IRUGO);
80
81static bool __read_mostly vmm_exclusive = 1;
82module_param(vmm_exclusive, bool, S_IRUGO);
83
84static bool __read_mostly fasteoi = 1;
85module_param(fasteoi, bool, S_IRUGO);
86
87static bool __read_mostly enable_apicv = 1;
88module_param(enable_apicv, bool, S_IRUGO);
89
90static bool __read_mostly enable_shadow_vmcs = 1;
91module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
92
93
94
95
96
97static bool __read_mostly nested = 0;
98module_param(nested, bool, S_IRUGO);
99
100#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
101#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
102#define KVM_VM_CR0_ALWAYS_ON \
103 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
104#define KVM_CR4_GUEST_OWNED_BITS \
105 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
106 | X86_CR4_OSXMMEXCPT)
107
108#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
109#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
110
111#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
112
113
114
115
116
117
118
119
120
121
122
123
124#define KVM_VMX_DEFAULT_PLE_GAP 128
125#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
126static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
127module_param(ple_gap, int, S_IRUGO);
128
129static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
130module_param(ple_window, int, S_IRUGO);
131
132extern const ulong vmx_return;
133
134#define NR_AUTOLOAD_MSRS 8
135#define VMCS02_POOL_SIZE 1
136
137struct vmcs {
138 u32 revision_id;
139 u32 abort;
140 char data[0];
141};
142
143
144
145
146
147
148struct loaded_vmcs {
149 struct vmcs *vmcs;
150 int cpu;
151 int launched;
152 struct list_head loaded_vmcss_on_cpu_link;
153};
154
155struct shared_msr_entry {
156 unsigned index;
157 u64 data;
158 u64 mask;
159};
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174typedef u64 natural_width;
175struct __packed vmcs12 {
176
177
178
179 u32 revision_id;
180 u32 abort;
181
182 u32 launch_state;
183 u32 padding[7];
184
185 u64 io_bitmap_a;
186 u64 io_bitmap_b;
187 u64 msr_bitmap;
188 u64 vm_exit_msr_store_addr;
189 u64 vm_exit_msr_load_addr;
190 u64 vm_entry_msr_load_addr;
191 u64 tsc_offset;
192 u64 virtual_apic_page_addr;
193 u64 apic_access_addr;
194 u64 ept_pointer;
195 u64 guest_physical_address;
196 u64 vmcs_link_pointer;
197 u64 guest_ia32_debugctl;
198 u64 guest_ia32_pat;
199 u64 guest_ia32_efer;
200 u64 guest_ia32_perf_global_ctrl;
201 u64 guest_pdptr0;
202 u64 guest_pdptr1;
203 u64 guest_pdptr2;
204 u64 guest_pdptr3;
205 u64 host_ia32_pat;
206 u64 host_ia32_efer;
207 u64 host_ia32_perf_global_ctrl;
208 u64 padding64[8];
209
210
211
212
213
214
215 natural_width cr0_guest_host_mask;
216 natural_width cr4_guest_host_mask;
217 natural_width cr0_read_shadow;
218 natural_width cr4_read_shadow;
219 natural_width cr3_target_value0;
220 natural_width cr3_target_value1;
221 natural_width cr3_target_value2;
222 natural_width cr3_target_value3;
223 natural_width exit_qualification;
224 natural_width guest_linear_address;
225 natural_width guest_cr0;
226 natural_width guest_cr3;
227 natural_width guest_cr4;
228 natural_width guest_es_base;
229 natural_width guest_cs_base;
230 natural_width guest_ss_base;
231 natural_width guest_ds_base;
232 natural_width guest_fs_base;
233 natural_width guest_gs_base;
234 natural_width guest_ldtr_base;
235 natural_width guest_tr_base;
236 natural_width guest_gdtr_base;
237 natural_width guest_idtr_base;
238 natural_width guest_dr7;
239 natural_width guest_rsp;
240 natural_width guest_rip;
241 natural_width guest_rflags;
242 natural_width guest_pending_dbg_exceptions;
243 natural_width guest_sysenter_esp;
244 natural_width guest_sysenter_eip;
245 natural_width host_cr0;
246 natural_width host_cr3;
247 natural_width host_cr4;
248 natural_width host_fs_base;
249 natural_width host_gs_base;
250 natural_width host_tr_base;
251 natural_width host_gdtr_base;
252 natural_width host_idtr_base;
253 natural_width host_ia32_sysenter_esp;
254 natural_width host_ia32_sysenter_eip;
255 natural_width host_rsp;
256 natural_width host_rip;
257 natural_width paddingl[8];
258 u32 pin_based_vm_exec_control;
259 u32 cpu_based_vm_exec_control;
260 u32 exception_bitmap;
261 u32 page_fault_error_code_mask;
262 u32 page_fault_error_code_match;
263 u32 cr3_target_count;
264 u32 vm_exit_controls;
265 u32 vm_exit_msr_store_count;
266 u32 vm_exit_msr_load_count;
267 u32 vm_entry_controls;
268 u32 vm_entry_msr_load_count;
269 u32 vm_entry_intr_info_field;
270 u32 vm_entry_exception_error_code;
271 u32 vm_entry_instruction_len;
272 u32 tpr_threshold;
273 u32 secondary_vm_exec_control;
274 u32 vm_instruction_error;
275 u32 vm_exit_reason;
276 u32 vm_exit_intr_info;
277 u32 vm_exit_intr_error_code;
278 u32 idt_vectoring_info_field;
279 u32 idt_vectoring_error_code;
280 u32 vm_exit_instruction_len;
281 u32 vmx_instruction_info;
282 u32 guest_es_limit;
283 u32 guest_cs_limit;
284 u32 guest_ss_limit;
285 u32 guest_ds_limit;
286 u32 guest_fs_limit;
287 u32 guest_gs_limit;
288 u32 guest_ldtr_limit;
289 u32 guest_tr_limit;
290 u32 guest_gdtr_limit;
291 u32 guest_idtr_limit;
292 u32 guest_es_ar_bytes;
293 u32 guest_cs_ar_bytes;
294 u32 guest_ss_ar_bytes;
295 u32 guest_ds_ar_bytes;
296 u32 guest_fs_ar_bytes;
297 u32 guest_gs_ar_bytes;
298 u32 guest_ldtr_ar_bytes;
299 u32 guest_tr_ar_bytes;
300 u32 guest_interruptibility_info;
301 u32 guest_activity_state;
302 u32 guest_sysenter_cs;
303 u32 host_ia32_sysenter_cs;
304 u32 vmx_preemption_timer_value;
305 u32 padding32[7];
306 u16 virtual_processor_id;
307 u16 guest_es_selector;
308 u16 guest_cs_selector;
309 u16 guest_ss_selector;
310 u16 guest_ds_selector;
311 u16 guest_fs_selector;
312 u16 guest_gs_selector;
313 u16 guest_ldtr_selector;
314 u16 guest_tr_selector;
315 u16 host_es_selector;
316 u16 host_cs_selector;
317 u16 host_ss_selector;
318 u16 host_ds_selector;
319 u16 host_fs_selector;
320 u16 host_gs_selector;
321 u16 host_tr_selector;
322};
323
324
325
326
327
328
329#define VMCS12_REVISION 0x11e57ed0
330
331
332
333
334
335
336#define VMCS12_SIZE 0x1000
337
338
339struct vmcs02_list {
340 struct list_head list;
341 gpa_t vmptr;
342 struct loaded_vmcs vmcs02;
343};
344
345
346
347
348
349struct nested_vmx {
350
351 bool vmxon;
352
353
354 gpa_t current_vmptr;
355
356 struct page *current_vmcs12_page;
357 struct vmcs12 *current_vmcs12;
358 struct vmcs *current_shadow_vmcs;
359
360
361
362
363 bool sync_shadow_vmcs;
364
365
366 struct list_head vmcs02_pool;
367 int vmcs02_num;
368 u64 vmcs01_tsc_offset;
369
370 bool nested_run_pending;
371
372
373
374
375 struct page *apic_access_page;
376 u64 msr_ia32_feature_control;
377};
378
379#define POSTED_INTR_ON 0
380
381struct pi_desc {
382 u32 pir[8];
383 u32 control;
384 u32 rsvd[7];
385} __aligned(64);
386
387static bool pi_test_and_set_on(struct pi_desc *pi_desc)
388{
389 return test_and_set_bit(POSTED_INTR_ON,
390 (unsigned long *)&pi_desc->control);
391}
392
393static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
394{
395 return test_and_clear_bit(POSTED_INTR_ON,
396 (unsigned long *)&pi_desc->control);
397}
398
399static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
400{
401 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
402}
403
404struct vcpu_vmx {
405 struct kvm_vcpu vcpu;
406 unsigned long host_rsp;
407 u8 fail;
408 u8 cpl;
409 bool nmi_known_unmasked;
410 u32 exit_intr_info;
411 u32 idt_vectoring_info;
412 ulong rflags;
413 struct shared_msr_entry *guest_msrs;
414 int nmsrs;
415 int save_nmsrs;
416 unsigned long host_idt_base;
417#ifdef CONFIG_X86_64
418 u64 msr_host_kernel_gs_base;
419 u64 msr_guest_kernel_gs_base;
420#endif
421
422
423
424
425
426 struct loaded_vmcs vmcs01;
427 struct loaded_vmcs *loaded_vmcs;
428 bool __launched;
429 struct msr_autoload {
430 unsigned nr;
431 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
432 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
433 } msr_autoload;
434 struct {
435 int loaded;
436 u16 fs_sel, gs_sel, ldt_sel;
437#ifdef CONFIG_X86_64
438 u16 ds_sel, es_sel;
439#endif
440 int gs_ldt_reload_needed;
441 int fs_reload_needed;
442 } host_state;
443 struct {
444 int vm86_active;
445 ulong save_rflags;
446 struct kvm_segment segs[8];
447 } rmode;
448 struct {
449 u32 bitmask;
450 struct kvm_save_segment {
451 u16 selector;
452 unsigned long base;
453 u32 limit;
454 u32 ar;
455 } seg[8];
456 } segment_cache;
457 int vpid;
458 bool emulation_required;
459
460
461 int soft_vnmi_blocked;
462 ktime_t entry_time;
463 s64 vnmi_blocked_time;
464 u32 exit_reason;
465
466 bool rdtscp_enabled;
467
468
469 struct pi_desc pi_desc;
470
471
472 struct nested_vmx nested;
473};
474
475enum segment_cache_field {
476 SEG_FIELD_SEL = 0,
477 SEG_FIELD_BASE = 1,
478 SEG_FIELD_LIMIT = 2,
479 SEG_FIELD_AR = 3,
480
481 SEG_FIELD_NR = 4
482};
483
484static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
485{
486 return container_of(vcpu, struct vcpu_vmx, vcpu);
487}
488
489#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
490#define FIELD(number, name) [number] = VMCS12_OFFSET(name)
491#define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \
492 [number##_HIGH] = VMCS12_OFFSET(name)+4
493
494
495static const unsigned long shadow_read_only_fields[] = {
496
497
498
499
500
501
502
503
504
505
506
507
508 VM_EXIT_REASON,
509 VM_EXIT_INTR_INFO,
510 VM_EXIT_INSTRUCTION_LEN,
511 IDT_VECTORING_INFO_FIELD,
512 IDT_VECTORING_ERROR_CODE,
513 VM_EXIT_INTR_ERROR_CODE,
514 EXIT_QUALIFICATION,
515 GUEST_LINEAR_ADDRESS,
516 GUEST_PHYSICAL_ADDRESS
517};
518static const int max_shadow_read_only_fields =
519 ARRAY_SIZE(shadow_read_only_fields);
520
521static const unsigned long shadow_read_write_fields[] = {
522 GUEST_RIP,
523 GUEST_RSP,
524 GUEST_CR0,
525 GUEST_CR3,
526 GUEST_CR4,
527 GUEST_INTERRUPTIBILITY_INFO,
528 GUEST_RFLAGS,
529 GUEST_CS_SELECTOR,
530 GUEST_CS_AR_BYTES,
531 GUEST_CS_LIMIT,
532 GUEST_CS_BASE,
533 GUEST_ES_BASE,
534 CR0_GUEST_HOST_MASK,
535 CR0_READ_SHADOW,
536 CR4_READ_SHADOW,
537 TSC_OFFSET,
538 EXCEPTION_BITMAP,
539 CPU_BASED_VM_EXEC_CONTROL,
540 VM_ENTRY_EXCEPTION_ERROR_CODE,
541 VM_ENTRY_INTR_INFO_FIELD,
542 VM_ENTRY_INSTRUCTION_LEN,
543 VM_ENTRY_EXCEPTION_ERROR_CODE,
544 HOST_FS_BASE,
545 HOST_GS_BASE,
546 HOST_FS_SELECTOR,
547 HOST_GS_SELECTOR
548};
549static const int max_shadow_read_write_fields =
550 ARRAY_SIZE(shadow_read_write_fields);
551
552static const unsigned short vmcs_field_to_offset_table[] = {
553 FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
554 FIELD(GUEST_ES_SELECTOR, guest_es_selector),
555 FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
556 FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
557 FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
558 FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
559 FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
560 FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
561 FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
562 FIELD(HOST_ES_SELECTOR, host_es_selector),
563 FIELD(HOST_CS_SELECTOR, host_cs_selector),
564 FIELD(HOST_SS_SELECTOR, host_ss_selector),
565 FIELD(HOST_DS_SELECTOR, host_ds_selector),
566 FIELD(HOST_FS_SELECTOR, host_fs_selector),
567 FIELD(HOST_GS_SELECTOR, host_gs_selector),
568 FIELD(HOST_TR_SELECTOR, host_tr_selector),
569 FIELD64(IO_BITMAP_A, io_bitmap_a),
570 FIELD64(IO_BITMAP_B, io_bitmap_b),
571 FIELD64(MSR_BITMAP, msr_bitmap),
572 FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
573 FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
574 FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
575 FIELD64(TSC_OFFSET, tsc_offset),
576 FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
577 FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
578 FIELD64(EPT_POINTER, ept_pointer),
579 FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
580 FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
581 FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
582 FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
583 FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
584 FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
585 FIELD64(GUEST_PDPTR0, guest_pdptr0),
586 FIELD64(GUEST_PDPTR1, guest_pdptr1),
587 FIELD64(GUEST_PDPTR2, guest_pdptr2),
588 FIELD64(GUEST_PDPTR3, guest_pdptr3),
589 FIELD64(HOST_IA32_PAT, host_ia32_pat),
590 FIELD64(HOST_IA32_EFER, host_ia32_efer),
591 FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
592 FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
593 FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
594 FIELD(EXCEPTION_BITMAP, exception_bitmap),
595 FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
596 FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
597 FIELD(CR3_TARGET_COUNT, cr3_target_count),
598 FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
599 FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
600 FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
601 FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
602 FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
603 FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
604 FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
605 FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
606 FIELD(TPR_THRESHOLD, tpr_threshold),
607 FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
608 FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
609 FIELD(VM_EXIT_REASON, vm_exit_reason),
610 FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
611 FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
612 FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
613 FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
614 FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
615 FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
616 FIELD(GUEST_ES_LIMIT, guest_es_limit),
617 FIELD(GUEST_CS_LIMIT, guest_cs_limit),
618 FIELD(GUEST_SS_LIMIT, guest_ss_limit),
619 FIELD(GUEST_DS_LIMIT, guest_ds_limit),
620 FIELD(GUEST_FS_LIMIT, guest_fs_limit),
621 FIELD(GUEST_GS_LIMIT, guest_gs_limit),
622 FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
623 FIELD(GUEST_TR_LIMIT, guest_tr_limit),
624 FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
625 FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
626 FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
627 FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
628 FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
629 FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
630 FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
631 FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
632 FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
633 FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
634 FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
635 FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
636 FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
637 FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
638 FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
639 FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
640 FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
641 FIELD(CR0_READ_SHADOW, cr0_read_shadow),
642 FIELD(CR4_READ_SHADOW, cr4_read_shadow),
643 FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
644 FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
645 FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
646 FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
647 FIELD(EXIT_QUALIFICATION, exit_qualification),
648 FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
649 FIELD(GUEST_CR0, guest_cr0),
650 FIELD(GUEST_CR3, guest_cr3),
651 FIELD(GUEST_CR4, guest_cr4),
652 FIELD(GUEST_ES_BASE, guest_es_base),
653 FIELD(GUEST_CS_BASE, guest_cs_base),
654 FIELD(GUEST_SS_BASE, guest_ss_base),
655 FIELD(GUEST_DS_BASE, guest_ds_base),
656 FIELD(GUEST_FS_BASE, guest_fs_base),
657 FIELD(GUEST_GS_BASE, guest_gs_base),
658 FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
659 FIELD(GUEST_TR_BASE, guest_tr_base),
660 FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
661 FIELD(GUEST_IDTR_BASE, guest_idtr_base),
662 FIELD(GUEST_DR7, guest_dr7),
663 FIELD(GUEST_RSP, guest_rsp),
664 FIELD(GUEST_RIP, guest_rip),
665 FIELD(GUEST_RFLAGS, guest_rflags),
666 FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
667 FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
668 FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
669 FIELD(HOST_CR0, host_cr0),
670 FIELD(HOST_CR3, host_cr3),
671 FIELD(HOST_CR4, host_cr4),
672 FIELD(HOST_FS_BASE, host_fs_base),
673 FIELD(HOST_GS_BASE, host_gs_base),
674 FIELD(HOST_TR_BASE, host_tr_base),
675 FIELD(HOST_GDTR_BASE, host_gdtr_base),
676 FIELD(HOST_IDTR_BASE, host_idtr_base),
677 FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
678 FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
679 FIELD(HOST_RSP, host_rsp),
680 FIELD(HOST_RIP, host_rip),
681};
682static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);
683
684static inline short vmcs_field_to_offset(unsigned long field)
685{
686 if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
687 return -1;
688 return vmcs_field_to_offset_table[field];
689}
690
691static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
692{
693 return to_vmx(vcpu)->nested.current_vmcs12;
694}
695
696static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
697{
698 struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
699 if (is_error_page(page))
700 return NULL;
701
702 return page;
703}
704
705static void nested_release_page(struct page *page)
706{
707 kvm_release_page_dirty(page);
708}
709
710static void nested_release_page_clean(struct page *page)
711{
712 kvm_release_page_clean(page);
713}
714
715static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
716static u64 construct_eptp(unsigned long root_hpa);
717static void kvm_cpu_vmxon(u64 addr);
718static void kvm_cpu_vmxoff(void);
719static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
720static void vmx_set_segment(struct kvm_vcpu *vcpu,
721 struct kvm_segment *var, int seg);
722static void vmx_get_segment(struct kvm_vcpu *vcpu,
723 struct kvm_segment *var, int seg);
724static bool guest_state_valid(struct kvm_vcpu *vcpu);
725static u32 vmx_segment_access_rights(struct kvm_segment *var);
726static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
727static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
728static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
729
730static DEFINE_PER_CPU(struct vmcs *, vmxarea);
731static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
732
733
734
735
736static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
737static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
738
739static unsigned long *vmx_io_bitmap_a;
740static unsigned long *vmx_io_bitmap_b;
741static unsigned long *vmx_msr_bitmap_legacy;
742static unsigned long *vmx_msr_bitmap_longmode;
743static unsigned long *vmx_msr_bitmap_legacy_x2apic;
744static unsigned long *vmx_msr_bitmap_longmode_x2apic;
745static unsigned long *vmx_vmread_bitmap;
746static unsigned long *vmx_vmwrite_bitmap;
747
748static bool cpu_has_load_ia32_efer;
749static bool cpu_has_load_perf_global_ctrl;
750
751static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
752static DEFINE_SPINLOCK(vmx_vpid_lock);
753
754static struct vmcs_config {
755 int size;
756 int order;
757 u32 revision_id;
758 u32 pin_based_exec_ctrl;
759 u32 cpu_based_exec_ctrl;
760 u32 cpu_based_2nd_exec_ctrl;
761 u32 vmexit_ctrl;
762 u32 vmentry_ctrl;
763} vmcs_config;
764
765static struct vmx_capability {
766 u32 ept;
767 u32 vpid;
768} vmx_capability;
769
770#define VMX_SEGMENT_FIELD(seg) \
771 [VCPU_SREG_##seg] = { \
772 .selector = GUEST_##seg##_SELECTOR, \
773 .base = GUEST_##seg##_BASE, \
774 .limit = GUEST_##seg##_LIMIT, \
775 .ar_bytes = GUEST_##seg##_AR_BYTES, \
776 }
777
778static const struct kvm_vmx_segment_field {
779 unsigned selector;
780 unsigned base;
781 unsigned limit;
782 unsigned ar_bytes;
783} kvm_vmx_segment_fields[] = {
784 VMX_SEGMENT_FIELD(CS),
785 VMX_SEGMENT_FIELD(DS),
786 VMX_SEGMENT_FIELD(ES),
787 VMX_SEGMENT_FIELD(FS),
788 VMX_SEGMENT_FIELD(GS),
789 VMX_SEGMENT_FIELD(SS),
790 VMX_SEGMENT_FIELD(TR),
791 VMX_SEGMENT_FIELD(LDTR),
792};
793
794static u64 host_efer;
795
796static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
797
798
799
800
801
802static const u32 vmx_msr_index[] = {
803#ifdef CONFIG_X86_64
804 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
805#endif
806 MSR_EFER, MSR_TSC_AUX, MSR_STAR,
807};
808#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
809
810static inline bool is_page_fault(u32 intr_info)
811{
812 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
813 INTR_INFO_VALID_MASK)) ==
814 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
815}
816
817static inline bool is_no_device(u32 intr_info)
818{
819 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
820 INTR_INFO_VALID_MASK)) ==
821 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
822}
823
824static inline bool is_invalid_opcode(u32 intr_info)
825{
826 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
827 INTR_INFO_VALID_MASK)) ==
828 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
829}
830
831static inline bool is_external_interrupt(u32 intr_info)
832{
833 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
834 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
835}
836
837static inline bool is_machine_check(u32 intr_info)
838{
839 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
840 INTR_INFO_VALID_MASK)) ==
841 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
842}
843
844static inline bool cpu_has_vmx_msr_bitmap(void)
845{
846 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
847}
848
849static inline bool cpu_has_vmx_tpr_shadow(void)
850{
851 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
852}
853
854static inline bool vm_need_tpr_shadow(struct kvm *kvm)
855{
856 return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
857}
858
859static inline bool cpu_has_secondary_exec_ctrls(void)
860{
861 return vmcs_config.cpu_based_exec_ctrl &
862 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
863}
864
865static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
866{
867 return vmcs_config.cpu_based_2nd_exec_ctrl &
868 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
869}
870
871static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
872{
873 return vmcs_config.cpu_based_2nd_exec_ctrl &
874 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
875}
876
877static inline bool cpu_has_vmx_apic_register_virt(void)
878{
879 return vmcs_config.cpu_based_2nd_exec_ctrl &
880 SECONDARY_EXEC_APIC_REGISTER_VIRT;
881}
882
883static inline bool cpu_has_vmx_virtual_intr_delivery(void)
884{
885 return vmcs_config.cpu_based_2nd_exec_ctrl &
886 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
887}
888
889static inline bool cpu_has_vmx_posted_intr(void)
890{
891 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
892}
893
894static inline bool cpu_has_vmx_apicv(void)
895{
896 return cpu_has_vmx_apic_register_virt() &&
897 cpu_has_vmx_virtual_intr_delivery() &&
898 cpu_has_vmx_posted_intr();
899}
900
901static inline bool cpu_has_vmx_flexpriority(void)
902{
903 return cpu_has_vmx_tpr_shadow() &&
904 cpu_has_vmx_virtualize_apic_accesses();
905}
906
907static inline bool cpu_has_vmx_ept_execute_only(void)
908{
909 return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
910}
911
912static inline bool cpu_has_vmx_eptp_uncacheable(void)
913{
914 return vmx_capability.ept & VMX_EPTP_UC_BIT;
915}
916
917static inline bool cpu_has_vmx_eptp_writeback(void)
918{
919 return vmx_capability.ept & VMX_EPTP_WB_BIT;
920}
921
922static inline bool cpu_has_vmx_ept_2m_page(void)
923{
924 return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
925}
926
927static inline bool cpu_has_vmx_ept_1g_page(void)
928{
929 return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
930}
931
932static inline bool cpu_has_vmx_ept_4levels(void)
933{
934 return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
935}
936
937static inline bool cpu_has_vmx_ept_ad_bits(void)
938{
939 return vmx_capability.ept & VMX_EPT_AD_BIT;
940}
941
942static inline bool cpu_has_vmx_invept_context(void)
943{
944 return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
945}
946
947static inline bool cpu_has_vmx_invept_global(void)
948{
949 return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
950}
951
952static inline bool cpu_has_vmx_invvpid_single(void)
953{
954 return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
955}
956
957static inline bool cpu_has_vmx_invvpid_global(void)
958{
959 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
960}
961
962static inline bool cpu_has_vmx_ept(void)
963{
964 return vmcs_config.cpu_based_2nd_exec_ctrl &
965 SECONDARY_EXEC_ENABLE_EPT;
966}
967
968static inline bool cpu_has_vmx_unrestricted_guest(void)
969{
970 return vmcs_config.cpu_based_2nd_exec_ctrl &
971 SECONDARY_EXEC_UNRESTRICTED_GUEST;
972}
973
974static inline bool cpu_has_vmx_ple(void)
975{
976 return vmcs_config.cpu_based_2nd_exec_ctrl &
977 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
978}
979
980static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
981{
982 return flexpriority_enabled && irqchip_in_kernel(kvm);
983}
984
985static inline bool cpu_has_vmx_vpid(void)
986{
987 return vmcs_config.cpu_based_2nd_exec_ctrl &
988 SECONDARY_EXEC_ENABLE_VPID;
989}
990
991static inline bool cpu_has_vmx_rdtscp(void)
992{
993 return vmcs_config.cpu_based_2nd_exec_ctrl &
994 SECONDARY_EXEC_RDTSCP;
995}
996
997static inline bool cpu_has_vmx_invpcid(void)
998{
999 return vmcs_config.cpu_based_2nd_exec_ctrl &
1000 SECONDARY_EXEC_ENABLE_INVPCID;
1001}
1002
1003static inline bool cpu_has_virtual_nmis(void)
1004{
1005 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1006}
1007
1008static inline bool cpu_has_vmx_wbinvd_exit(void)
1009{
1010 return vmcs_config.cpu_based_2nd_exec_ctrl &
1011 SECONDARY_EXEC_WBINVD_EXITING;
1012}
1013
1014static inline bool cpu_has_vmx_shadow_vmcs(void)
1015{
1016 u64 vmx_msr;
1017 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1018
1019 if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1020 return false;
1021
1022 return vmcs_config.cpu_based_2nd_exec_ctrl &
1023 SECONDARY_EXEC_SHADOW_VMCS;
1024}
1025
1026static inline bool report_flexpriority(void)
1027{
1028 return flexpriority_enabled;
1029}
1030
1031static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1032{
1033 return vmcs12->cpu_based_vm_exec_control & bit;
1034}
1035
1036static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1037{
1038 return (vmcs12->cpu_based_vm_exec_control &
1039 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1040 (vmcs12->secondary_vm_exec_control & bit);
1041}
1042
1043static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1044{
1045 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1046}
1047
1048static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1049{
1050 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1051}
1052
1053static inline bool is_exception(u32 intr_info)
1054{
1055 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1056 == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
1057}
1058
1059static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
1060static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1061 struct vmcs12 *vmcs12,
1062 u32 reason, unsigned long qualification);
1063
1064static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1065{
1066 int i;
1067
1068 for (i = 0; i < vmx->nmsrs; ++i)
1069 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
1070 return i;
1071 return -1;
1072}
1073
1074static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1075{
1076 struct {
1077 u64 vpid : 16;
1078 u64 rsvd : 48;
1079 u64 gva;
1080 } operand = { vpid, 0, gva };
1081
1082 asm volatile (__ex(ASM_VMX_INVVPID)
1083
1084 "; ja 1f ; ud2 ; 1:"
1085 : : "a"(&operand), "c"(ext) : "cc", "memory");
1086}
1087
1088static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1089{
1090 struct {
1091 u64 eptp, gpa;
1092 } operand = {eptp, gpa};
1093
1094 asm volatile (__ex(ASM_VMX_INVEPT)
1095
1096 "; ja 1f ; ud2 ; 1:\n"
1097 : : "a" (&operand), "c" (ext) : "cc", "memory");
1098}
1099
1100static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
1101{
1102 int i;
1103
1104 i = __find_msr_index(vmx, msr);
1105 if (i >= 0)
1106 return &vmx->guest_msrs[i];
1107 return NULL;
1108}
1109
1110static void vmcs_clear(struct vmcs *vmcs)
1111{
1112 u64 phys_addr = __pa(vmcs);
1113 u8 error;
1114
1115 asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
1116 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1117 : "cc", "memory");
1118 if (error)
1119 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1120 vmcs, phys_addr);
1121}
1122
1123static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1124{
1125 vmcs_clear(loaded_vmcs->vmcs);
1126 loaded_vmcs->cpu = -1;
1127 loaded_vmcs->launched = 0;
1128}
1129
1130static void vmcs_load(struct vmcs *vmcs)
1131{
1132 u64 phys_addr = __pa(vmcs);
1133 u8 error;
1134
1135 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1136 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1137 : "cc", "memory");
1138 if (error)
1139 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1140 vmcs, phys_addr);
1141}
1142
1143#ifdef CONFIG_KEXEC
1144
1145
1146
1147
1148
1149static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1150
1151static inline void crash_enable_local_vmclear(int cpu)
1152{
1153 cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1154}
1155
1156static inline void crash_disable_local_vmclear(int cpu)
1157{
1158 cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1159}
1160
1161static inline int crash_local_vmclear_enabled(int cpu)
1162{
1163 return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1164}
1165
1166static void crash_vmclear_local_loaded_vmcss(void)
1167{
1168 int cpu = raw_smp_processor_id();
1169 struct loaded_vmcs *v;
1170
1171 if (!crash_local_vmclear_enabled(cpu))
1172 return;
1173
1174 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1175 loaded_vmcss_on_cpu_link)
1176 vmcs_clear(v->vmcs);
1177}
1178#else
1179static inline void crash_enable_local_vmclear(int cpu) { }
1180static inline void crash_disable_local_vmclear(int cpu) { }
1181#endif
1182
1183static void __loaded_vmcs_clear(void *arg)
1184{
1185 struct loaded_vmcs *loaded_vmcs = arg;
1186 int cpu = raw_smp_processor_id();
1187
1188 if (loaded_vmcs->cpu != cpu)
1189 return;
1190 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1191 per_cpu(current_vmcs, cpu) = NULL;
1192 crash_disable_local_vmclear(cpu);
1193 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1194
1195
1196
1197
1198
1199
1200
1201 smp_wmb();
1202
1203 loaded_vmcs_init(loaded_vmcs);
1204 crash_enable_local_vmclear(cpu);
1205}
1206
1207static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1208{
1209 int cpu = loaded_vmcs->cpu;
1210
1211 if (cpu != -1)
1212 smp_call_function_single(cpu,
1213 __loaded_vmcs_clear, loaded_vmcs, 1);
1214}
1215
1216static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
1217{
1218 if (vmx->vpid == 0)
1219 return;
1220
1221 if (cpu_has_vmx_invvpid_single())
1222 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
1223}
1224
1225static inline void vpid_sync_vcpu_global(void)
1226{
1227 if (cpu_has_vmx_invvpid_global())
1228 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1229}
1230
1231static inline void vpid_sync_context(struct vcpu_vmx *vmx)
1232{
1233 if (cpu_has_vmx_invvpid_single())
1234 vpid_sync_vcpu_single(vmx);
1235 else
1236 vpid_sync_vcpu_global();
1237}
1238
1239static inline void ept_sync_global(void)
1240{
1241 if (cpu_has_vmx_invept_global())
1242 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1243}
1244
1245static inline void ept_sync_context(u64 eptp)
1246{
1247 if (enable_ept) {
1248 if (cpu_has_vmx_invept_context())
1249 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1250 else
1251 ept_sync_global();
1252 }
1253}
1254
1255static __always_inline unsigned long vmcs_readl(unsigned long field)
1256{
1257 unsigned long value;
1258
1259 asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1260 : "=a"(value) : "d"(field) : "cc");
1261 return value;
1262}
1263
1264static __always_inline u16 vmcs_read16(unsigned long field)
1265{
1266 return vmcs_readl(field);
1267}
1268
1269static __always_inline u32 vmcs_read32(unsigned long field)
1270{
1271 return vmcs_readl(field);
1272}
1273
1274static __always_inline u64 vmcs_read64(unsigned long field)
1275{
1276#ifdef CONFIG_X86_64
1277 return vmcs_readl(field);
1278#else
1279 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
1280#endif
1281}
1282
1283static noinline void vmwrite_error(unsigned long field, unsigned long value)
1284{
1285 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1286 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1287 dump_stack();
1288}
1289
1290static void vmcs_writel(unsigned long field, unsigned long value)
1291{
1292 u8 error;
1293
1294 asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1295 : "=q"(error) : "a"(value), "d"(field) : "cc");
1296 if (unlikely(error))
1297 vmwrite_error(field, value);
1298}
1299
1300static void vmcs_write16(unsigned long field, u16 value)
1301{
1302 vmcs_writel(field, value);
1303}
1304
1305static void vmcs_write32(unsigned long field, u32 value)
1306{
1307 vmcs_writel(field, value);
1308}
1309
1310static void vmcs_write64(unsigned long field, u64 value)
1311{
1312 vmcs_writel(field, value);
1313#ifndef CONFIG_X86_64
1314 asm volatile ("");
1315 vmcs_writel(field+1, value >> 32);
1316#endif
1317}
1318
1319static void vmcs_clear_bits(unsigned long field, u32 mask)
1320{
1321 vmcs_writel(field, vmcs_readl(field) & ~mask);
1322}
1323
1324static void vmcs_set_bits(unsigned long field, u32 mask)
1325{
1326 vmcs_writel(field, vmcs_readl(field) | mask);
1327}
1328
1329static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1330{
1331 vmx->segment_cache.bitmask = 0;
1332}
1333
1334static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1335 unsigned field)
1336{
1337 bool ret;
1338 u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1339
1340 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1341 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1342 vmx->segment_cache.bitmask = 0;
1343 }
1344 ret = vmx->segment_cache.bitmask & mask;
1345 vmx->segment_cache.bitmask |= mask;
1346 return ret;
1347}
1348
1349static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1350{
1351 u16 *p = &vmx->segment_cache.seg[seg].selector;
1352
1353 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1354 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1355 return *p;
1356}
1357
1358static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1359{
1360 ulong *p = &vmx->segment_cache.seg[seg].base;
1361
1362 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1363 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1364 return *p;
1365}
1366
1367static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1368{
1369 u32 *p = &vmx->segment_cache.seg[seg].limit;
1370
1371 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1372 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1373 return *p;
1374}
1375
1376static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1377{
1378 u32 *p = &vmx->segment_cache.seg[seg].ar;
1379
1380 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1381 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1382 return *p;
1383}
1384
1385static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1386{
1387 u32 eb;
1388
1389 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1390 (1u << NM_VECTOR) | (1u << DB_VECTOR);
1391 if ((vcpu->guest_debug &
1392 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1393 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1394 eb |= 1u << BP_VECTOR;
1395 if (to_vmx(vcpu)->rmode.vm86_active)
1396 eb = ~0;
1397 if (enable_ept)
1398 eb &= ~(1u << PF_VECTOR);
1399 if (vcpu->fpu_active)
1400 eb &= ~(1u << NM_VECTOR);
1401
1402
1403
1404
1405
1406
1407 if (is_guest_mode(vcpu))
1408 eb |= get_vmcs12(vcpu)->exception_bitmap;
1409
1410 vmcs_write32(EXCEPTION_BITMAP, eb);
1411}
1412
1413static void clear_atomic_switch_msr_special(unsigned long entry,
1414 unsigned long exit)
1415{
1416 vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
1417 vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
1418}
1419
1420static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1421{
1422 unsigned i;
1423 struct msr_autoload *m = &vmx->msr_autoload;
1424
1425 switch (msr) {
1426 case MSR_EFER:
1427 if (cpu_has_load_ia32_efer) {
1428 clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
1429 VM_EXIT_LOAD_IA32_EFER);
1430 return;
1431 }
1432 break;
1433 case MSR_CORE_PERF_GLOBAL_CTRL:
1434 if (cpu_has_load_perf_global_ctrl) {
1435 clear_atomic_switch_msr_special(
1436 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1437 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1438 return;
1439 }
1440 break;
1441 }
1442
1443 for (i = 0; i < m->nr; ++i)
1444 if (m->guest[i].index == msr)
1445 break;
1446
1447 if (i == m->nr)
1448 return;
1449 --m->nr;
1450 m->guest[i] = m->guest[m->nr];
1451 m->host[i] = m->host[m->nr];
1452 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1453 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1454}
1455
1456static void add_atomic_switch_msr_special(unsigned long entry,
1457 unsigned long exit, unsigned long guest_val_vmcs,
1458 unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
1459{
1460 vmcs_write64(guest_val_vmcs, guest_val);
1461 vmcs_write64(host_val_vmcs, host_val);
1462 vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
1463 vmcs_set_bits(VM_EXIT_CONTROLS, exit);
1464}
1465
1466static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1467 u64 guest_val, u64 host_val)
1468{
1469 unsigned i;
1470 struct msr_autoload *m = &vmx->msr_autoload;
1471
1472 switch (msr) {
1473 case MSR_EFER:
1474 if (cpu_has_load_ia32_efer) {
1475 add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
1476 VM_EXIT_LOAD_IA32_EFER,
1477 GUEST_IA32_EFER,
1478 HOST_IA32_EFER,
1479 guest_val, host_val);
1480 return;
1481 }
1482 break;
1483 case MSR_CORE_PERF_GLOBAL_CTRL:
1484 if (cpu_has_load_perf_global_ctrl) {
1485 add_atomic_switch_msr_special(
1486 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1487 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1488 GUEST_IA32_PERF_GLOBAL_CTRL,
1489 HOST_IA32_PERF_GLOBAL_CTRL,
1490 guest_val, host_val);
1491 return;
1492 }
1493 break;
1494 }
1495
1496 for (i = 0; i < m->nr; ++i)
1497 if (m->guest[i].index == msr)
1498 break;
1499
1500 if (i == NR_AUTOLOAD_MSRS) {
1501 printk_once(KERN_WARNING"Not enough mst switch entries. "
1502 "Can't add msr %x\n", msr);
1503 return;
1504 } else if (i == m->nr) {
1505 ++m->nr;
1506 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1507 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1508 }
1509
1510 m->guest[i].index = msr;
1511 m->guest[i].value = guest_val;
1512 m->host[i].index = msr;
1513 m->host[i].value = host_val;
1514}
1515
1516static void reload_tss(void)
1517{
1518
1519
1520
1521 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
1522 struct desc_struct *descs;
1523
1524 descs = (void *)gdt->address;
1525 descs[GDT_ENTRY_TSS].type = 9;
1526 load_TR_desc();
1527}
1528
1529static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
1530{
1531 u64 guest_efer;
1532 u64 ignore_bits;
1533
1534 guest_efer = vmx->vcpu.arch.efer;
1535
1536
1537
1538
1539
1540 ignore_bits = EFER_NX | EFER_SCE;
1541#ifdef CONFIG_X86_64
1542 ignore_bits |= EFER_LMA | EFER_LME;
1543
1544 if (guest_efer & EFER_LMA)
1545 ignore_bits &= ~(u64)EFER_SCE;
1546#endif
1547 guest_efer &= ~ignore_bits;
1548 guest_efer |= host_efer & ignore_bits;
1549 vmx->guest_msrs[efer_offset].data = guest_efer;
1550 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
1551
1552 clear_atomic_switch_msr(vmx, MSR_EFER);
1553
1554 if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
1555 guest_efer = vmx->vcpu.arch.efer;
1556 if (!(guest_efer & EFER_LMA))
1557 guest_efer &= ~EFER_LME;
1558 add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
1559 return false;
1560 }
1561
1562 return true;
1563}
1564
1565static unsigned long segment_base(u16 selector)
1566{
1567 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
1568 struct desc_struct *d;
1569 unsigned long table_base;
1570 unsigned long v;
1571
1572 if (!(selector & ~3))
1573 return 0;
1574
1575 table_base = gdt->address;
1576
1577 if (selector & 4) {
1578 u16 ldt_selector = kvm_read_ldt();
1579
1580 if (!(ldt_selector & ~3))
1581 return 0;
1582
1583 table_base = segment_base(ldt_selector);
1584 }
1585 d = (struct desc_struct *)(table_base + (selector & ~7));
1586 v = get_desc_base(d);
1587#ifdef CONFIG_X86_64
1588 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
1589 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
1590#endif
1591 return v;
1592}
1593
1594static inline unsigned long kvm_read_tr_base(void)
1595{
1596 u16 tr;
1597 asm("str %0" : "=g"(tr));
1598 return segment_base(tr);
1599}
1600
1601static void vmx_save_host_state(struct kvm_vcpu *vcpu)
1602{
1603 struct vcpu_vmx *vmx = to_vmx(vcpu);
1604 int i;
1605
1606 if (vmx->host_state.loaded)
1607 return;
1608
1609 vmx->host_state.loaded = 1;
1610
1611
1612
1613
1614 vmx->host_state.ldt_sel = kvm_read_ldt();
1615 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
1616 savesegment(fs, vmx->host_state.fs_sel);
1617 if (!(vmx->host_state.fs_sel & 7)) {
1618 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
1619 vmx->host_state.fs_reload_needed = 0;
1620 } else {
1621 vmcs_write16(HOST_FS_SELECTOR, 0);
1622 vmx->host_state.fs_reload_needed = 1;
1623 }
1624 savesegment(gs, vmx->host_state.gs_sel);
1625 if (!(vmx->host_state.gs_sel & 7))
1626 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
1627 else {
1628 vmcs_write16(HOST_GS_SELECTOR, 0);
1629 vmx->host_state.gs_ldt_reload_needed = 1;
1630 }
1631
1632#ifdef CONFIG_X86_64
1633 savesegment(ds, vmx->host_state.ds_sel);
1634 savesegment(es, vmx->host_state.es_sel);
1635#endif
1636
1637#ifdef CONFIG_X86_64
1638 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1639 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1640#else
1641 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
1642 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
1643#endif
1644
1645#ifdef CONFIG_X86_64
1646 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1647 if (is_long_mode(&vmx->vcpu))
1648 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1649#endif
1650 for (i = 0; i < vmx->save_nmsrs; ++i)
1651 kvm_set_shared_msr(vmx->guest_msrs[i].index,
1652 vmx->guest_msrs[i].data,
1653 vmx->guest_msrs[i].mask);
1654}
1655
1656static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1657{
1658 if (!vmx->host_state.loaded)
1659 return;
1660
1661 ++vmx->vcpu.stat.host_state_reload;
1662 vmx->host_state.loaded = 0;
1663#ifdef CONFIG_X86_64
1664 if (is_long_mode(&vmx->vcpu))
1665 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1666#endif
1667 if (vmx->host_state.gs_ldt_reload_needed) {
1668 kvm_load_ldt(vmx->host_state.ldt_sel);
1669#ifdef CONFIG_X86_64
1670 load_gs_index(vmx->host_state.gs_sel);
1671#else
1672 loadsegment(gs, vmx->host_state.gs_sel);
1673#endif
1674 }
1675 if (vmx->host_state.fs_reload_needed)
1676 loadsegment(fs, vmx->host_state.fs_sel);
1677#ifdef CONFIG_X86_64
1678 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
1679 loadsegment(ds, vmx->host_state.ds_sel);
1680 loadsegment(es, vmx->host_state.es_sel);
1681 }
1682#endif
1683 reload_tss();
1684#ifdef CONFIG_X86_64
1685 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1686#endif
1687
1688
1689
1690
1691 if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
1692 stts();
1693 load_gdt(&__get_cpu_var(host_gdt));
1694}
1695
1696static void vmx_load_host_state(struct vcpu_vmx *vmx)
1697{
1698 preempt_disable();
1699 __vmx_load_host_state(vmx);
1700 preempt_enable();
1701}
1702
1703
1704
1705
1706
1707static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1708{
1709 struct vcpu_vmx *vmx = to_vmx(vcpu);
1710 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1711
1712 if (!vmm_exclusive)
1713 kvm_cpu_vmxon(phys_addr);
1714 else if (vmx->loaded_vmcs->cpu != cpu)
1715 loaded_vmcs_clear(vmx->loaded_vmcs);
1716
1717 if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
1718 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1719 vmcs_load(vmx->loaded_vmcs->vmcs);
1720 }
1721
1722 if (vmx->loaded_vmcs->cpu != cpu) {
1723 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
1724 unsigned long sysenter_esp;
1725
1726 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1727 local_irq_disable();
1728 crash_disable_local_vmclear(cpu);
1729
1730
1731
1732
1733
1734
1735 smp_rmb();
1736
1737 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1738 &per_cpu(loaded_vmcss_on_cpu, cpu));
1739 crash_enable_local_vmclear(cpu);
1740 local_irq_enable();
1741
1742
1743
1744
1745
1746 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base());
1747 vmcs_writel(HOST_GDTR_BASE, gdt->address);
1748
1749 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
1750 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp);
1751 vmx->loaded_vmcs->cpu = cpu;
1752 }
1753}
1754
1755static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1756{
1757 __vmx_load_host_state(to_vmx(vcpu));
1758 if (!vmm_exclusive) {
1759 __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
1760 vcpu->cpu = -1;
1761 kvm_cpu_vmxoff();
1762 }
1763}
1764
1765static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
1766{
1767 ulong cr0;
1768
1769 if (vcpu->fpu_active)
1770 return;
1771 vcpu->fpu_active = 1;
1772 cr0 = vmcs_readl(GUEST_CR0);
1773 cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
1774 cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
1775 vmcs_writel(GUEST_CR0, cr0);
1776 update_exception_bitmap(vcpu);
1777 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
1778 if (is_guest_mode(vcpu))
1779 vcpu->arch.cr0_guest_owned_bits &=
1780 ~get_vmcs12(vcpu)->cr0_guest_host_mask;
1781 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
1782}
1783
1784static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
1785
1786
1787
1788
1789
1790
1791static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
1792{
1793 return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
1794 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
1795}
1796static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
1797{
1798 return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
1799 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
1800}
1801
1802static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
1803{
1804
1805
1806
1807 vmx_decache_cr0_guest_bits(vcpu);
1808 vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
1809 update_exception_bitmap(vcpu);
1810 vcpu->arch.cr0_guest_owned_bits = 0;
1811 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
1812 if (is_guest_mode(vcpu)) {
1813
1814
1815
1816
1817
1818
1819
1820
1821 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1822 vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) |
1823 (vcpu->arch.cr0 & X86_CR0_TS);
1824 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
1825 } else
1826 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
1827}
1828
1829static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1830{
1831 unsigned long rflags, save_rflags;
1832
1833 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
1834 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1835 rflags = vmcs_readl(GUEST_RFLAGS);
1836 if (to_vmx(vcpu)->rmode.vm86_active) {
1837 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1838 save_rflags = to_vmx(vcpu)->rmode.save_rflags;
1839 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1840 }
1841 to_vmx(vcpu)->rflags = rflags;
1842 }
1843 return to_vmx(vcpu)->rflags;
1844}
1845
1846static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1847{
1848 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1849 to_vmx(vcpu)->rflags = rflags;
1850 if (to_vmx(vcpu)->rmode.vm86_active) {
1851 to_vmx(vcpu)->rmode.save_rflags = rflags;
1852 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1853 }
1854 vmcs_writel(GUEST_RFLAGS, rflags);
1855}
1856
1857static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1858{
1859 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1860 int ret = 0;
1861
1862 if (interruptibility & GUEST_INTR_STATE_STI)
1863 ret |= KVM_X86_SHADOW_INT_STI;
1864 if (interruptibility & GUEST_INTR_STATE_MOV_SS)
1865 ret |= KVM_X86_SHADOW_INT_MOV_SS;
1866
1867 return ret & mask;
1868}
1869
1870static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1871{
1872 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1873 u32 interruptibility = interruptibility_old;
1874
1875 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1876
1877 if (mask & KVM_X86_SHADOW_INT_MOV_SS)
1878 interruptibility |= GUEST_INTR_STATE_MOV_SS;
1879 else if (mask & KVM_X86_SHADOW_INT_STI)
1880 interruptibility |= GUEST_INTR_STATE_STI;
1881
1882 if ((interruptibility != interruptibility_old))
1883 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1884}
1885
1886static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
1887{
1888 unsigned long rip;
1889
1890 rip = kvm_rip_read(vcpu);
1891 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1892 kvm_rip_write(vcpu, rip);
1893
1894
1895 vmx_set_interrupt_shadow(vcpu, 0);
1896}
1897
1898
1899
1900
1901
1902
1903
1904
1905static int nested_pf_handled(struct kvm_vcpu *vcpu)
1906{
1907 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1908
1909
1910 if (!(vmcs12->exception_bitmap & (1u << PF_VECTOR)))
1911 return 0;
1912
1913 nested_vmx_vmexit(vcpu);
1914 return 1;
1915}
1916
1917static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
1918 bool has_error_code, u32 error_code,
1919 bool reinject)
1920{
1921 struct vcpu_vmx *vmx = to_vmx(vcpu);
1922 u32 intr_info = nr | INTR_INFO_VALID_MASK;
1923
1924 if (nr == PF_VECTOR && is_guest_mode(vcpu) &&
1925 !vmx->nested.nested_run_pending && nested_pf_handled(vcpu))
1926 return;
1927
1928 if (has_error_code) {
1929 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
1930 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
1931 }
1932
1933 if (vmx->rmode.vm86_active) {
1934 int inc_eip = 0;
1935 if (kvm_exception_is_soft(nr))
1936 inc_eip = vcpu->arch.event_exit_inst_len;
1937 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
1938 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1939 return;
1940 }
1941
1942 if (kvm_exception_is_soft(nr)) {
1943 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1944 vmx->vcpu.arch.event_exit_inst_len);
1945 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
1946 } else
1947 intr_info |= INTR_TYPE_HARD_EXCEPTION;
1948
1949 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
1950}
1951
1952static bool vmx_rdtscp_supported(void)
1953{
1954 return cpu_has_vmx_rdtscp();
1955}
1956
1957static bool vmx_invpcid_supported(void)
1958{
1959 return cpu_has_vmx_invpcid() && enable_ept;
1960}
1961
1962
1963
1964
1965static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
1966{
1967 struct shared_msr_entry tmp;
1968
1969 tmp = vmx->guest_msrs[to];
1970 vmx->guest_msrs[to] = vmx->guest_msrs[from];
1971 vmx->guest_msrs[from] = tmp;
1972}
1973
1974static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
1975{
1976 unsigned long *msr_bitmap;
1977
1978 if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
1979 if (is_long_mode(vcpu))
1980 msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
1981 else
1982 msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
1983 } else {
1984 if (is_long_mode(vcpu))
1985 msr_bitmap = vmx_msr_bitmap_longmode;
1986 else
1987 msr_bitmap = vmx_msr_bitmap_legacy;
1988 }
1989
1990 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
1991}
1992
1993
1994
1995
1996
1997
1998static void setup_msrs(struct vcpu_vmx *vmx)
1999{
2000 int save_nmsrs, index;
2001
2002 save_nmsrs = 0;
2003#ifdef CONFIG_X86_64
2004 if (is_long_mode(&vmx->vcpu)) {
2005 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
2006 if (index >= 0)
2007 move_msr_up(vmx, index, save_nmsrs++);
2008 index = __find_msr_index(vmx, MSR_LSTAR);
2009 if (index >= 0)
2010 move_msr_up(vmx, index, save_nmsrs++);
2011 index = __find_msr_index(vmx, MSR_CSTAR);
2012 if (index >= 0)
2013 move_msr_up(vmx, index, save_nmsrs++);
2014 index = __find_msr_index(vmx, MSR_TSC_AUX);
2015 if (index >= 0 && vmx->rdtscp_enabled)
2016 move_msr_up(vmx, index, save_nmsrs++);
2017
2018
2019
2020
2021 index = __find_msr_index(vmx, MSR_STAR);
2022 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
2023 move_msr_up(vmx, index, save_nmsrs++);
2024 }
2025#endif
2026 index = __find_msr_index(vmx, MSR_EFER);
2027 if (index >= 0 && update_transition_efer(vmx, index))
2028 move_msr_up(vmx, index, save_nmsrs++);
2029
2030 vmx->save_nmsrs = save_nmsrs;
2031
2032 if (cpu_has_vmx_msr_bitmap())
2033 vmx_set_msr_bitmap(&vmx->vcpu);
2034}
2035
2036
2037
2038
2039
2040static u64 guest_read_tsc(void)
2041{
2042 u64 host_tsc, tsc_offset;
2043
2044 rdtscll(host_tsc);
2045 tsc_offset = vmcs_read64(TSC_OFFSET);
2046 return host_tsc + tsc_offset;
2047}
2048
2049
2050
2051
2052
2053u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2054{
2055 u64 tsc_offset;
2056
2057 tsc_offset = is_guest_mode(vcpu) ?
2058 to_vmx(vcpu)->nested.vmcs01_tsc_offset :
2059 vmcs_read64(TSC_OFFSET);
2060 return host_tsc + tsc_offset;
2061}
2062
2063
2064
2065
2066
2067static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
2068{
2069 if (!scale)
2070 return;
2071
2072 if (user_tsc_khz > tsc_khz) {
2073 vcpu->arch.tsc_catchup = 1;
2074 vcpu->arch.tsc_always_catchup = 1;
2075 } else
2076 WARN(1, "user requested TSC rate below hardware speed\n");
2077}
2078
2079static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
2080{
2081 return vmcs_read64(TSC_OFFSET);
2082}
2083
2084
2085
2086
2087static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2088{
2089 if (is_guest_mode(vcpu)) {
2090
2091
2092
2093
2094
2095
2096 struct vmcs12 *vmcs12;
2097 to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
2098
2099 vmcs12 = get_vmcs12(vcpu);
2100 vmcs_write64(TSC_OFFSET, offset +
2101 (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
2102 vmcs12->tsc_offset : 0));
2103 } else {
2104 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2105 vmcs_read64(TSC_OFFSET), offset);
2106 vmcs_write64(TSC_OFFSET, offset);
2107 }
2108}
2109
2110static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
2111{
2112 u64 offset = vmcs_read64(TSC_OFFSET);
2113
2114 vmcs_write64(TSC_OFFSET, offset + adjustment);
2115 if (is_guest_mode(vcpu)) {
2116
2117 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
2118 } else
2119 trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
2120 offset + adjustment);
2121}
2122
2123static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
2124{
2125 return target_tsc - native_read_tsc();
2126}
2127
2128static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
2129{
2130 struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
2131 return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
2132}
2133
2134
2135
2136
2137
2138
2139
2140static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2141{
2142 return nested && guest_cpuid_has_vmx(vcpu);
2143}
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high;
2158static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high;
2159static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
2160static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
2161static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
2162static u32 nested_vmx_misc_low, nested_vmx_misc_high;
2163static u32 nested_vmx_ept_caps;
2164static __init void nested_vmx_setup_ctls_msrs(void)
2165{
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
2183 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high);
2184
2185
2186
2187
2188 nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2189 nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK |
2190 PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS |
2191 PIN_BASED_VMX_PREEMPTION_TIMER;
2192 nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2193
2194
2195
2196
2197
2198
2199 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
2200 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high);
2201 nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
2202
2203 nested_vmx_exit_ctls_high &=
2204#ifdef CONFIG_X86_64
2205 VM_EXIT_HOST_ADDR_SPACE_SIZE |
2206#endif
2207 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
2208 nested_vmx_exit_ctls_high |= (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
2209 VM_EXIT_LOAD_IA32_EFER);
2210
2211
2212 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2213 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high);
2214
2215 nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
2216 nested_vmx_entry_ctls_high &=
2217#ifdef CONFIG_X86_64
2218 VM_ENTRY_IA32E_MODE |
2219#endif
2220 VM_ENTRY_LOAD_IA32_PAT;
2221 nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR |
2222 VM_ENTRY_LOAD_IA32_EFER);
2223
2224
2225 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2226 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
2227 nested_vmx_procbased_ctls_low = 0;
2228 nested_vmx_procbased_ctls_high &=
2229 CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_USE_TSC_OFFSETING |
2230 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
2231 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
2232 CPU_BASED_CR3_STORE_EXITING |
2233#ifdef CONFIG_X86_64
2234 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
2235#endif
2236 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
2237 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
2238 CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING |
2239 CPU_BASED_PAUSE_EXITING |
2240 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2241
2242
2243
2244
2245
2246
2247 nested_vmx_procbased_ctls_high |= CPU_BASED_USE_MSR_BITMAPS;
2248
2249
2250 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
2251 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high);
2252 nested_vmx_secondary_ctls_low = 0;
2253 nested_vmx_secondary_ctls_high &=
2254 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2255 SECONDARY_EXEC_WBINVD_EXITING;
2256
2257 if (enable_ept) {
2258
2259 nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
2260 nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2261 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
2262 nested_vmx_ept_caps &= vmx_capability.ept;
2263
2264
2265
2266
2267
2268 nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
2269 VMX_EPT_EXTENT_CONTEXT_BIT;
2270 } else
2271 nested_vmx_ept_caps = 0;
2272
2273
2274 rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high);
2275 nested_vmx_misc_low &= VMX_MISC_PREEMPTION_TIMER_RATE_MASK |
2276 VMX_MISC_SAVE_EFER_LMA;
2277 nested_vmx_misc_high = 0;
2278}
2279
2280static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2281{
2282
2283
2284
2285 return ((control & high) | low) == control;
2286}
2287
2288static inline u64 vmx_control_msr(u32 low, u32 high)
2289{
2290 return low | ((u64)high << 32);
2291}
2292
2293
2294
2295
2296
2297
2298
2299
2300static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2301{
2302 if (!nested_vmx_allowed(vcpu) && msr_index >= MSR_IA32_VMX_BASIC &&
2303 msr_index <= MSR_IA32_VMX_TRUE_ENTRY_CTLS) {
2304
2305
2306
2307
2308 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
2309 return 1;
2310 }
2311
2312 switch (msr_index) {
2313 case MSR_IA32_FEATURE_CONTROL:
2314 if (nested_vmx_allowed(vcpu)) {
2315 *pdata = to_vmx(vcpu)->nested.msr_ia32_feature_control;
2316 break;
2317 }
2318 return 0;
2319 case MSR_IA32_VMX_BASIC:
2320
2321
2322
2323
2324
2325
2326 *pdata = VMCS12_REVISION |
2327 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
2328 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
2329 break;
2330 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2331 case MSR_IA32_VMX_PINBASED_CTLS:
2332 *pdata = vmx_control_msr(nested_vmx_pinbased_ctls_low,
2333 nested_vmx_pinbased_ctls_high);
2334 break;
2335 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2336 case MSR_IA32_VMX_PROCBASED_CTLS:
2337 *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low,
2338 nested_vmx_procbased_ctls_high);
2339 break;
2340 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2341 case MSR_IA32_VMX_EXIT_CTLS:
2342 *pdata = vmx_control_msr(nested_vmx_exit_ctls_low,
2343 nested_vmx_exit_ctls_high);
2344 break;
2345 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2346 case MSR_IA32_VMX_ENTRY_CTLS:
2347 *pdata = vmx_control_msr(nested_vmx_entry_ctls_low,
2348 nested_vmx_entry_ctls_high);
2349 break;
2350 case MSR_IA32_VMX_MISC:
2351 *pdata = vmx_control_msr(nested_vmx_misc_low,
2352 nested_vmx_misc_high);
2353 break;
2354
2355
2356
2357
2358
2359#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
2360#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
2361 case MSR_IA32_VMX_CR0_FIXED0:
2362 *pdata = VMXON_CR0_ALWAYSON;
2363 break;
2364 case MSR_IA32_VMX_CR0_FIXED1:
2365 *pdata = -1ULL;
2366 break;
2367 case MSR_IA32_VMX_CR4_FIXED0:
2368 *pdata = VMXON_CR4_ALWAYSON;
2369 break;
2370 case MSR_IA32_VMX_CR4_FIXED1:
2371 *pdata = -1ULL;
2372 break;
2373 case MSR_IA32_VMX_VMCS_ENUM:
2374 *pdata = 0x1f;
2375 break;
2376 case MSR_IA32_VMX_PROCBASED_CTLS2:
2377 *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low,
2378 nested_vmx_secondary_ctls_high);
2379 break;
2380 case MSR_IA32_VMX_EPT_VPID_CAP:
2381
2382 *pdata = nested_vmx_ept_caps;
2383 break;
2384 default:
2385 return 0;
2386 }
2387
2388 return 1;
2389}
2390
2391static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2392{
2393 u32 msr_index = msr_info->index;
2394 u64 data = msr_info->data;
2395 bool host_initialized = msr_info->host_initiated;
2396
2397 if (!nested_vmx_allowed(vcpu))
2398 return 0;
2399
2400 if (msr_index == MSR_IA32_FEATURE_CONTROL) {
2401 if (!host_initialized &&
2402 to_vmx(vcpu)->nested.msr_ia32_feature_control
2403 & FEATURE_CONTROL_LOCKED)
2404 return 0;
2405 to_vmx(vcpu)->nested.msr_ia32_feature_control = data;
2406 return 1;
2407 }
2408
2409
2410
2411
2412
2413 return 0;
2414}
2415
2416
2417
2418
2419
2420
2421static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2422{
2423 u64 data;
2424 struct shared_msr_entry *msr;
2425
2426 if (!pdata) {
2427 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
2428 return -EINVAL;
2429 }
2430
2431 switch (msr_index) {
2432#ifdef CONFIG_X86_64
2433 case MSR_FS_BASE:
2434 data = vmcs_readl(GUEST_FS_BASE);
2435 break;
2436 case MSR_GS_BASE:
2437 data = vmcs_readl(GUEST_GS_BASE);
2438 break;
2439 case MSR_KERNEL_GS_BASE:
2440 vmx_load_host_state(to_vmx(vcpu));
2441 data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
2442 break;
2443#endif
2444 case MSR_EFER:
2445 return kvm_get_msr_common(vcpu, msr_index, pdata);
2446 case MSR_IA32_TSC:
2447 data = guest_read_tsc();
2448 break;
2449 case MSR_IA32_SYSENTER_CS:
2450 data = vmcs_read32(GUEST_SYSENTER_CS);
2451 break;
2452 case MSR_IA32_SYSENTER_EIP:
2453 data = vmcs_readl(GUEST_SYSENTER_EIP);
2454 break;
2455 case MSR_IA32_SYSENTER_ESP:
2456 data = vmcs_readl(GUEST_SYSENTER_ESP);
2457 break;
2458 case MSR_TSC_AUX:
2459 if (!to_vmx(vcpu)->rdtscp_enabled)
2460 return 1;
2461
2462 default:
2463 if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
2464 return 0;
2465 msr = find_msr_entry(to_vmx(vcpu), msr_index);
2466 if (msr) {
2467 data = msr->data;
2468 break;
2469 }
2470 return kvm_get_msr_common(vcpu, msr_index, pdata);
2471 }
2472
2473 *pdata = data;
2474 return 0;
2475}
2476
2477
2478
2479
2480
2481
2482static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2483{
2484 struct vcpu_vmx *vmx = to_vmx(vcpu);
2485 struct shared_msr_entry *msr;
2486 int ret = 0;
2487 u32 msr_index = msr_info->index;
2488 u64 data = msr_info->data;
2489
2490 switch (msr_index) {
2491 case MSR_EFER:
2492 ret = kvm_set_msr_common(vcpu, msr_info);
2493 break;
2494#ifdef CONFIG_X86_64
2495 case MSR_FS_BASE:
2496 vmx_segment_cache_clear(vmx);
2497 vmcs_writel(GUEST_FS_BASE, data);
2498 break;
2499 case MSR_GS_BASE:
2500 vmx_segment_cache_clear(vmx);
2501 vmcs_writel(GUEST_GS_BASE, data);
2502 break;
2503 case MSR_KERNEL_GS_BASE:
2504 vmx_load_host_state(vmx);
2505 vmx->msr_guest_kernel_gs_base = data;
2506 break;
2507#endif
2508 case MSR_IA32_SYSENTER_CS:
2509 vmcs_write32(GUEST_SYSENTER_CS, data);
2510 break;
2511 case MSR_IA32_SYSENTER_EIP:
2512 vmcs_writel(GUEST_SYSENTER_EIP, data);
2513 break;
2514 case MSR_IA32_SYSENTER_ESP:
2515 vmcs_writel(GUEST_SYSENTER_ESP, data);
2516 break;
2517 case MSR_IA32_TSC:
2518 kvm_write_tsc(vcpu, msr_info);
2519 break;
2520 case MSR_IA32_CR_PAT:
2521 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2522 vmcs_write64(GUEST_IA32_PAT, data);
2523 vcpu->arch.pat = data;
2524 break;
2525 }
2526 ret = kvm_set_msr_common(vcpu, msr_info);
2527 break;
2528 case MSR_IA32_TSC_ADJUST:
2529 ret = kvm_set_msr_common(vcpu, msr_info);
2530 break;
2531 case MSR_TSC_AUX:
2532 if (!vmx->rdtscp_enabled)
2533 return 1;
2534
2535 if ((data >> 32) != 0)
2536 return 1;
2537
2538 default:
2539 if (vmx_set_vmx_msr(vcpu, msr_info))
2540 break;
2541 msr = find_msr_entry(vmx, msr_index);
2542 if (msr) {
2543 msr->data = data;
2544 if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
2545 preempt_disable();
2546 kvm_set_shared_msr(msr->index, msr->data,
2547 msr->mask);
2548 preempt_enable();
2549 }
2550 break;
2551 }
2552 ret = kvm_set_msr_common(vcpu, msr_info);
2553 }
2554
2555 return ret;
2556}
2557
2558static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2559{
2560 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
2561 switch (reg) {
2562 case VCPU_REGS_RSP:
2563 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2564 break;
2565 case VCPU_REGS_RIP:
2566 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2567 break;
2568 case VCPU_EXREG_PDPTR:
2569 if (enable_ept)
2570 ept_save_pdptrs(vcpu);
2571 break;
2572 default:
2573 break;
2574 }
2575}
2576
2577static __init int cpu_has_kvm_support(void)
2578{
2579 return cpu_has_vmx();
2580}
2581
2582static __init int vmx_disabled_by_bios(void)
2583{
2584 u64 msr;
2585
2586 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
2587 if (msr & FEATURE_CONTROL_LOCKED) {
2588
2589 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
2590 && tboot_enabled())
2591 return 1;
2592
2593 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
2594 && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
2595 && !tboot_enabled()) {
2596 printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
2597 "activate TXT before enabling KVM\n");
2598 return 1;
2599 }
2600
2601 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
2602 && !tboot_enabled())
2603 return 1;
2604 }
2605
2606 return 0;
2607}
2608
2609static void kvm_cpu_vmxon(u64 addr)
2610{
2611 asm volatile (ASM_VMX_VMXON_RAX
2612 : : "a"(&addr), "m"(addr)
2613 : "memory", "cc");
2614}
2615
2616static int hardware_enable(void *garbage)
2617{
2618 int cpu = raw_smp_processor_id();
2619 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
2620 u64 old, test_bits;
2621
2622 if (read_cr4() & X86_CR4_VMXE)
2623 return -EBUSY;
2624
2625 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636 crash_enable_local_vmclear(cpu);
2637
2638 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
2639
2640 test_bits = FEATURE_CONTROL_LOCKED;
2641 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
2642 if (tboot_enabled())
2643 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
2644
2645 if ((old & test_bits) != test_bits) {
2646
2647 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
2648 }
2649 write_cr4(read_cr4() | X86_CR4_VMXE);
2650
2651 if (vmm_exclusive) {
2652 kvm_cpu_vmxon(phys_addr);
2653 ept_sync_global();
2654 }
2655
2656 native_store_gdt(&__get_cpu_var(host_gdt));
2657
2658 return 0;
2659}
2660
2661static void vmclear_local_loaded_vmcss(void)
2662{
2663 int cpu = raw_smp_processor_id();
2664 struct loaded_vmcs *v, *n;
2665
2666 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
2667 loaded_vmcss_on_cpu_link)
2668 __loaded_vmcs_clear(v);
2669}
2670
2671
2672
2673
2674
2675static void kvm_cpu_vmxoff(void)
2676{
2677 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
2678}
2679
2680static void hardware_disable(void *garbage)
2681{
2682 if (vmm_exclusive) {
2683 vmclear_local_loaded_vmcss();
2684 kvm_cpu_vmxoff();
2685 }
2686 write_cr4(read_cr4() & ~X86_CR4_VMXE);
2687}
2688
2689static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
2690 u32 msr, u32 *result)
2691{
2692 u32 vmx_msr_low, vmx_msr_high;
2693 u32 ctl = ctl_min | ctl_opt;
2694
2695 rdmsr(msr, vmx_msr_low, vmx_msr_high);
2696
2697 ctl &= vmx_msr_high;
2698 ctl |= vmx_msr_low;
2699
2700
2701 if (ctl_min & ~ctl)
2702 return -EIO;
2703
2704 *result = ctl;
2705 return 0;
2706}
2707
2708static __init bool allow_1_setting(u32 msr, u32 ctl)
2709{
2710 u32 vmx_msr_low, vmx_msr_high;
2711
2712 rdmsr(msr, vmx_msr_low, vmx_msr_high);
2713 return vmx_msr_high & ctl;
2714}
2715
2716static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2717{
2718 u32 vmx_msr_low, vmx_msr_high;
2719 u32 min, opt, min2, opt2;
2720 u32 _pin_based_exec_control = 0;
2721 u32 _cpu_based_exec_control = 0;
2722 u32 _cpu_based_2nd_exec_control = 0;
2723 u32 _vmexit_control = 0;
2724 u32 _vmentry_control = 0;
2725
2726 min = CPU_BASED_HLT_EXITING |
2727#ifdef CONFIG_X86_64
2728 CPU_BASED_CR8_LOAD_EXITING |
2729 CPU_BASED_CR8_STORE_EXITING |
2730#endif
2731 CPU_BASED_CR3_LOAD_EXITING |
2732 CPU_BASED_CR3_STORE_EXITING |
2733 CPU_BASED_USE_IO_BITMAPS |
2734 CPU_BASED_MOV_DR_EXITING |
2735 CPU_BASED_USE_TSC_OFFSETING |
2736 CPU_BASED_MWAIT_EXITING |
2737 CPU_BASED_MONITOR_EXITING |
2738 CPU_BASED_INVLPG_EXITING |
2739 CPU_BASED_RDPMC_EXITING;
2740
2741 opt = CPU_BASED_TPR_SHADOW |
2742 CPU_BASED_USE_MSR_BITMAPS |
2743 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2744 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
2745 &_cpu_based_exec_control) < 0)
2746 return -EIO;
2747#ifdef CONFIG_X86_64
2748 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2749 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
2750 ~CPU_BASED_CR8_STORE_EXITING;
2751#endif
2752 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
2753 min2 = 0;
2754 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2755 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2756 SECONDARY_EXEC_WBINVD_EXITING |
2757 SECONDARY_EXEC_ENABLE_VPID |
2758 SECONDARY_EXEC_ENABLE_EPT |
2759 SECONDARY_EXEC_UNRESTRICTED_GUEST |
2760 SECONDARY_EXEC_PAUSE_LOOP_EXITING |
2761 SECONDARY_EXEC_RDTSCP |
2762 SECONDARY_EXEC_ENABLE_INVPCID |
2763 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2764 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2765 SECONDARY_EXEC_SHADOW_VMCS;
2766 if (adjust_vmx_controls(min2, opt2,
2767 MSR_IA32_VMX_PROCBASED_CTLS2,
2768 &_cpu_based_2nd_exec_control) < 0)
2769 return -EIO;
2770 }
2771#ifndef CONFIG_X86_64
2772 if (!(_cpu_based_2nd_exec_control &
2773 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2774 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
2775#endif
2776
2777 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2778 _cpu_based_2nd_exec_control &= ~(
2779 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2780 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2781 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2782
2783 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
2784
2785
2786 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
2787 CPU_BASED_CR3_STORE_EXITING |
2788 CPU_BASED_INVLPG_EXITING);
2789 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
2790 vmx_capability.ept, vmx_capability.vpid);
2791 }
2792
2793 min = 0;
2794#ifdef CONFIG_X86_64
2795 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
2796#endif
2797 opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
2798 VM_EXIT_ACK_INTR_ON_EXIT;
2799 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
2800 &_vmexit_control) < 0)
2801 return -EIO;
2802
2803 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
2804 opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR;
2805 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
2806 &_pin_based_exec_control) < 0)
2807 return -EIO;
2808
2809 if (!(_cpu_based_2nd_exec_control &
2810 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) ||
2811 !(_vmexit_control & VM_EXIT_ACK_INTR_ON_EXIT))
2812 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
2813
2814 min = 0;
2815 opt = VM_ENTRY_LOAD_IA32_PAT;
2816 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
2817 &_vmentry_control) < 0)
2818 return -EIO;
2819
2820 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
2821
2822
2823 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
2824 return -EIO;
2825
2826#ifdef CONFIG_X86_64
2827
2828 if (vmx_msr_high & (1u<<16))
2829 return -EIO;
2830#endif
2831
2832
2833 if (((vmx_msr_high >> 18) & 15) != 6)
2834 return -EIO;
2835
2836 vmcs_conf->size = vmx_msr_high & 0x1fff;
2837 vmcs_conf->order = get_order(vmcs_config.size);
2838 vmcs_conf->revision_id = vmx_msr_low;
2839
2840 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
2841 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
2842 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
2843 vmcs_conf->vmexit_ctrl = _vmexit_control;
2844 vmcs_conf->vmentry_ctrl = _vmentry_control;
2845
2846 cpu_has_load_ia32_efer =
2847 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2848 VM_ENTRY_LOAD_IA32_EFER)
2849 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2850 VM_EXIT_LOAD_IA32_EFER);
2851
2852 cpu_has_load_perf_global_ctrl =
2853 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2854 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
2855 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2856 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872 if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
2873 switch (boot_cpu_data.x86_model) {
2874 case 26:
2875 case 30:
2876 case 37:
2877 case 44:
2878 case 46:
2879 cpu_has_load_perf_global_ctrl = false;
2880 printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
2881 "does not work properly. Using workaround\n");
2882 break;
2883 default:
2884 break;
2885 }
2886 }
2887
2888 return 0;
2889}
2890
2891static struct vmcs *alloc_vmcs_cpu(int cpu)
2892{
2893 int node = cpu_to_node(cpu);
2894 struct page *pages;
2895 struct vmcs *vmcs;
2896
2897 pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
2898 if (!pages)
2899 return NULL;
2900 vmcs = page_address(pages);
2901 memset(vmcs, 0, vmcs_config.size);
2902 vmcs->revision_id = vmcs_config.revision_id;
2903 return vmcs;
2904}
2905
2906static struct vmcs *alloc_vmcs(void)
2907{
2908 return alloc_vmcs_cpu(raw_smp_processor_id());
2909}
2910
2911static void free_vmcs(struct vmcs *vmcs)
2912{
2913 free_pages((unsigned long)vmcs, vmcs_config.order);
2914}
2915
2916
2917
2918
2919static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2920{
2921 if (!loaded_vmcs->vmcs)
2922 return;
2923 loaded_vmcs_clear(loaded_vmcs);
2924 free_vmcs(loaded_vmcs->vmcs);
2925 loaded_vmcs->vmcs = NULL;
2926}
2927
2928static void free_kvm_area(void)
2929{
2930 int cpu;
2931
2932 for_each_possible_cpu(cpu) {
2933 free_vmcs(per_cpu(vmxarea, cpu));
2934 per_cpu(vmxarea, cpu) = NULL;
2935 }
2936}
2937
2938static __init int alloc_kvm_area(void)
2939{
2940 int cpu;
2941
2942 for_each_possible_cpu(cpu) {
2943 struct vmcs *vmcs;
2944
2945 vmcs = alloc_vmcs_cpu(cpu);
2946 if (!vmcs) {
2947 free_kvm_area();
2948 return -ENOMEM;
2949 }
2950
2951 per_cpu(vmxarea, cpu) = vmcs;
2952 }
2953 return 0;
2954}
2955
2956static __init int hardware_setup(void)
2957{
2958 if (setup_vmcs_config(&vmcs_config) < 0)
2959 return -EIO;
2960
2961 if (boot_cpu_has(X86_FEATURE_NX))
2962 kvm_enable_efer_bits(EFER_NX);
2963
2964 if (!cpu_has_vmx_vpid())
2965 enable_vpid = 0;
2966 if (!cpu_has_vmx_shadow_vmcs())
2967 enable_shadow_vmcs = 0;
2968
2969 if (!cpu_has_vmx_ept() ||
2970 !cpu_has_vmx_ept_4levels()) {
2971 enable_ept = 0;
2972 enable_unrestricted_guest = 0;
2973 enable_ept_ad_bits = 0;
2974 }
2975
2976 if (!cpu_has_vmx_ept_ad_bits())
2977 enable_ept_ad_bits = 0;
2978
2979 if (!cpu_has_vmx_unrestricted_guest())
2980 enable_unrestricted_guest = 0;
2981
2982 if (!cpu_has_vmx_flexpriority())
2983 flexpriority_enabled = 0;
2984
2985 if (!cpu_has_vmx_tpr_shadow())
2986 kvm_x86_ops->update_cr8_intercept = NULL;
2987
2988 if (enable_ept && !cpu_has_vmx_ept_2m_page())
2989 kvm_disable_largepages();
2990
2991 if (!cpu_has_vmx_ple())
2992 ple_gap = 0;
2993
2994 if (!cpu_has_vmx_apicv())
2995 enable_apicv = 0;
2996
2997 if (enable_apicv)
2998 kvm_x86_ops->update_cr8_intercept = NULL;
2999 else {
3000 kvm_x86_ops->hwapic_irr_update = NULL;
3001 kvm_x86_ops->deliver_posted_interrupt = NULL;
3002 kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
3003 }
3004
3005 if (nested)
3006 nested_vmx_setup_ctls_msrs();
3007
3008 return alloc_kvm_area();
3009}
3010
3011static __exit void hardware_unsetup(void)
3012{
3013 free_kvm_area();
3014}
3015
3016static bool emulation_required(struct kvm_vcpu *vcpu)
3017{
3018 return emulate_invalid_guest_state && !guest_state_valid(vcpu);
3019}
3020
3021static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
3022 struct kvm_segment *save)
3023{
3024 if (!emulate_invalid_guest_state) {
3025
3026
3027
3028
3029
3030
3031
3032 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
3033 save->selector &= ~SELECTOR_RPL_MASK;
3034 save->dpl = save->selector & SELECTOR_RPL_MASK;
3035 save->s = 1;
3036 }
3037 vmx_set_segment(vcpu, save, seg);
3038}
3039
3040static void enter_pmode(struct kvm_vcpu *vcpu)
3041{
3042 unsigned long flags;
3043 struct vcpu_vmx *vmx = to_vmx(vcpu);
3044
3045
3046
3047
3048
3049 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3050 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3051 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3052 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3053 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3054 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3055
3056 vmx->rmode.vm86_active = 0;
3057
3058 vmx_segment_cache_clear(vmx);
3059
3060 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3061
3062 flags = vmcs_readl(GUEST_RFLAGS);
3063 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
3064 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3065 vmcs_writel(GUEST_RFLAGS, flags);
3066
3067 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
3068 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
3069
3070 update_exception_bitmap(vcpu);
3071
3072 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3073 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3074 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3075 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3076 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3077 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3078
3079
3080 __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
3081 vmx->cpl = 0;
3082}
3083
3084static void fix_rmode_seg(int seg, struct kvm_segment *save)
3085{
3086 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3087 struct kvm_segment var = *save;
3088
3089 var.dpl = 0x3;
3090 if (seg == VCPU_SREG_CS)
3091 var.type = 0x3;
3092
3093 if (!emulate_invalid_guest_state) {
3094 var.selector = var.base >> 4;
3095 var.base = var.base & 0xffff0;
3096 var.limit = 0xffff;
3097 var.g = 0;
3098 var.db = 0;
3099 var.present = 1;
3100 var.s = 1;
3101 var.l = 0;
3102 var.unusable = 0;
3103 var.type = 0x3;
3104 var.avl = 0;
3105 if (save->base & 0xf)
3106 printk_once(KERN_WARNING "kvm: segment base is not "
3107 "paragraph aligned when entering "
3108 "protected mode (seg=%d)", seg);
3109 }
3110
3111 vmcs_write16(sf->selector, var.selector);
3112 vmcs_write32(sf->base, var.base);
3113 vmcs_write32(sf->limit, var.limit);
3114 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
3115}
3116
3117static void enter_rmode(struct kvm_vcpu *vcpu)
3118{
3119 unsigned long flags;
3120 struct vcpu_vmx *vmx = to_vmx(vcpu);
3121
3122 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3123 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3124 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3125 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3126 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3127 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3128 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3129
3130 vmx->rmode.vm86_active = 1;
3131
3132
3133
3134
3135
3136 if (!vcpu->kvm->arch.tss_addr)
3137 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
3138 "called before entering vcpu\n");
3139
3140 vmx_segment_cache_clear(vmx);
3141
3142 vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr);
3143 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
3144 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
3145
3146 flags = vmcs_readl(GUEST_RFLAGS);
3147 vmx->rmode.save_rflags = flags;
3148
3149 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3150
3151 vmcs_writel(GUEST_RFLAGS, flags);
3152 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
3153 update_exception_bitmap(vcpu);
3154
3155 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3156 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3157 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3158 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3159 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3160 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3161
3162 kvm_mmu_reset_context(vcpu);
3163}
3164
3165static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
3166{
3167 struct vcpu_vmx *vmx = to_vmx(vcpu);
3168 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
3169
3170 if (!msr)
3171 return;
3172
3173
3174
3175
3176
3177 vmx_load_host_state(to_vmx(vcpu));
3178 vcpu->arch.efer = efer;
3179 if (efer & EFER_LMA) {
3180 vmcs_write32(VM_ENTRY_CONTROLS,
3181 vmcs_read32(VM_ENTRY_CONTROLS) |
3182 VM_ENTRY_IA32E_MODE);
3183 msr->data = efer;
3184 } else {
3185 vmcs_write32(VM_ENTRY_CONTROLS,
3186 vmcs_read32(VM_ENTRY_CONTROLS) &
3187 ~VM_ENTRY_IA32E_MODE);
3188
3189 msr->data = efer & ~EFER_LME;
3190 }
3191 setup_msrs(vmx);
3192}
3193
3194#ifdef CONFIG_X86_64
3195
3196static void enter_lmode(struct kvm_vcpu *vcpu)
3197{
3198 u32 guest_tr_ar;
3199
3200 vmx_segment_cache_clear(to_vmx(vcpu));
3201
3202 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
3203 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
3204 pr_debug_ratelimited("%s: tss fixup for long mode. \n",
3205 __func__);
3206 vmcs_write32(GUEST_TR_AR_BYTES,
3207 (guest_tr_ar & ~AR_TYPE_MASK)
3208 | AR_TYPE_BUSY_64_TSS);
3209 }
3210 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
3211}
3212
3213static void exit_lmode(struct kvm_vcpu *vcpu)
3214{
3215 vmcs_write32(VM_ENTRY_CONTROLS,
3216 vmcs_read32(VM_ENTRY_CONTROLS)
3217 & ~VM_ENTRY_IA32E_MODE);
3218 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3219}
3220
3221#endif
3222
3223static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
3224{
3225 vpid_sync_context(to_vmx(vcpu));
3226 if (enable_ept) {
3227 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3228 return;
3229 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
3230 }
3231}
3232
3233static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
3234{
3235 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
3236
3237 vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
3238 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
3239}
3240
3241static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
3242{
3243 if (enable_ept && is_paging(vcpu))
3244 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3245 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
3246}
3247
3248static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
3249{
3250 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
3251
3252 vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
3253 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
3254}
3255
3256static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
3257{
3258 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3259
3260 if (!test_bit(VCPU_EXREG_PDPTR,
3261 (unsigned long *)&vcpu->arch.regs_dirty))
3262 return;
3263
3264 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3265 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3266 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3267 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3268 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3269 }
3270}
3271
3272static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3273{
3274 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3275
3276 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3277 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3278 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3279 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3280 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3281 }
3282
3283 __set_bit(VCPU_EXREG_PDPTR,
3284 (unsigned long *)&vcpu->arch.regs_avail);
3285 __set_bit(VCPU_EXREG_PDPTR,
3286 (unsigned long *)&vcpu->arch.regs_dirty);
3287}
3288
3289static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
3290
3291static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
3292 unsigned long cr0,
3293 struct kvm_vcpu *vcpu)
3294{
3295 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3296 vmx_decache_cr3(vcpu);
3297 if (!(cr0 & X86_CR0_PG)) {
3298
3299 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
3300 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
3301 (CPU_BASED_CR3_LOAD_EXITING |
3302 CPU_BASED_CR3_STORE_EXITING));
3303 vcpu->arch.cr0 = cr0;
3304 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3305 } else if (!is_paging(vcpu)) {
3306
3307 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
3308 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
3309 ~(CPU_BASED_CR3_LOAD_EXITING |
3310 CPU_BASED_CR3_STORE_EXITING));
3311 vcpu->arch.cr0 = cr0;
3312 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3313 }
3314
3315 if (!(cr0 & X86_CR0_WP))
3316 *hw_cr0 &= ~X86_CR0_WP;
3317}
3318
3319static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3320{
3321 struct vcpu_vmx *vmx = to_vmx(vcpu);
3322 unsigned long hw_cr0;
3323
3324 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK);
3325 if (enable_unrestricted_guest)
3326 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
3327 else {
3328 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
3329
3330 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3331 enter_pmode(vcpu);
3332
3333 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3334 enter_rmode(vcpu);
3335 }
3336
3337#ifdef CONFIG_X86_64
3338 if (vcpu->arch.efer & EFER_LME) {
3339 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
3340 enter_lmode(vcpu);
3341 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
3342 exit_lmode(vcpu);
3343 }
3344#endif
3345
3346 if (enable_ept)
3347 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
3348
3349 if (!vcpu->fpu_active)
3350 hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
3351
3352 vmcs_writel(CR0_READ_SHADOW, cr0);
3353 vmcs_writel(GUEST_CR0, hw_cr0);
3354 vcpu->arch.cr0 = cr0;
3355
3356
3357 vmx->emulation_required = emulation_required(vcpu);
3358}
3359
3360static u64 construct_eptp(unsigned long root_hpa)
3361{
3362 u64 eptp;
3363
3364
3365 eptp = VMX_EPT_DEFAULT_MT |
3366 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
3367 if (enable_ept_ad_bits)
3368 eptp |= VMX_EPT_AD_ENABLE_BIT;
3369 eptp |= (root_hpa & PAGE_MASK);
3370
3371 return eptp;
3372}
3373
3374static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
3375{
3376 unsigned long guest_cr3;
3377 u64 eptp;
3378
3379 guest_cr3 = cr3;
3380 if (enable_ept) {
3381 eptp = construct_eptp(cr3);
3382 vmcs_write64(EPT_POINTER, eptp);
3383 guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
3384 vcpu->kvm->arch.ept_identity_map_addr;
3385 ept_load_pdptrs(vcpu);
3386 }
3387
3388 vmx_flush_tlb(vcpu);
3389 vmcs_writel(GUEST_CR3, guest_cr3);
3390}
3391
3392static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3393{
3394 unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
3395 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
3396
3397 if (cr4 & X86_CR4_VMXE) {
3398
3399
3400
3401
3402
3403
3404 if (!nested_vmx_allowed(vcpu))
3405 return 1;
3406 }
3407 if (to_vmx(vcpu)->nested.vmxon &&
3408 ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON))
3409 return 1;
3410
3411 vcpu->arch.cr4 = cr4;
3412 if (enable_ept) {
3413 if (!is_paging(vcpu)) {
3414 hw_cr4 &= ~X86_CR4_PAE;
3415 hw_cr4 |= X86_CR4_PSE;
3416
3417
3418
3419
3420
3421
3422
3423 hw_cr4 &= ~X86_CR4_SMEP;
3424 } else if (!(cr4 & X86_CR4_PAE)) {
3425 hw_cr4 &= ~X86_CR4_PAE;
3426 }
3427 }
3428
3429 vmcs_writel(CR4_READ_SHADOW, cr4);
3430 vmcs_writel(GUEST_CR4, hw_cr4);
3431 return 0;
3432}
3433
3434static void vmx_get_segment(struct kvm_vcpu *vcpu,
3435 struct kvm_segment *var, int seg)
3436{
3437 struct vcpu_vmx *vmx = to_vmx(vcpu);
3438 u32 ar;
3439
3440 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3441 *var = vmx->rmode.segs[seg];
3442 if (seg == VCPU_SREG_TR
3443 || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3444 return;
3445 var->base = vmx_read_guest_seg_base(vmx, seg);
3446 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3447 return;
3448 }
3449 var->base = vmx_read_guest_seg_base(vmx, seg);
3450 var->limit = vmx_read_guest_seg_limit(vmx, seg);
3451 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3452 ar = vmx_read_guest_seg_ar(vmx, seg);
3453 var->unusable = (ar >> 16) & 1;
3454 var->type = ar & 15;
3455 var->s = (ar >> 4) & 1;
3456 var->dpl = (ar >> 5) & 3;
3457
3458
3459
3460
3461
3462
3463
3464 var->present = !var->unusable;
3465 var->avl = (ar >> 12) & 1;
3466 var->l = (ar >> 13) & 1;
3467 var->db = (ar >> 14) & 1;
3468 var->g = (ar >> 15) & 1;
3469}
3470
3471static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3472{
3473 struct kvm_segment s;
3474
3475 if (to_vmx(vcpu)->rmode.vm86_active) {
3476 vmx_get_segment(vcpu, &s, seg);
3477 return s.base;
3478 }
3479 return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
3480}
3481
3482static int vmx_get_cpl(struct kvm_vcpu *vcpu)
3483{
3484 struct vcpu_vmx *vmx = to_vmx(vcpu);
3485
3486 if (!is_protmode(vcpu))
3487 return 0;
3488
3489 if (!is_long_mode(vcpu)
3490 && (kvm_get_rflags(vcpu) & X86_EFLAGS_VM))
3491 return 3;
3492
3493 if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
3494 __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
3495 vmx->cpl = vmx_read_guest_seg_selector(vmx, VCPU_SREG_CS) & 3;
3496 }
3497
3498 return vmx->cpl;
3499}
3500
3501
3502static u32 vmx_segment_access_rights(struct kvm_segment *var)
3503{
3504 u32 ar;
3505
3506 if (var->unusable || !var->present)
3507 ar = 1 << 16;
3508 else {
3509 ar = var->type & 15;
3510 ar |= (var->s & 1) << 4;
3511 ar |= (var->dpl & 3) << 5;
3512 ar |= (var->present & 1) << 7;
3513 ar |= (var->avl & 1) << 12;
3514 ar |= (var->l & 1) << 13;
3515 ar |= (var->db & 1) << 14;
3516 ar |= (var->g & 1) << 15;
3517 }
3518
3519 return ar;
3520}
3521
3522static void vmx_set_segment(struct kvm_vcpu *vcpu,
3523 struct kvm_segment *var, int seg)
3524{
3525 struct vcpu_vmx *vmx = to_vmx(vcpu);
3526 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3527
3528 vmx_segment_cache_clear(vmx);
3529 if (seg == VCPU_SREG_CS)
3530 __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
3531
3532 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3533 vmx->rmode.segs[seg] = *var;
3534 if (seg == VCPU_SREG_TR)
3535 vmcs_write16(sf->selector, var->selector);
3536 else if (var->s)
3537 fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3538 goto out;
3539 }
3540
3541 vmcs_writel(sf->base, var->base);
3542 vmcs_write32(sf->limit, var->limit);
3543 vmcs_write16(sf->selector, var->selector);
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556 if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
3557 var->type |= 0x1;
3558
3559 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
3560
3561out:
3562 vmx->emulation_required |= emulation_required(vcpu);
3563}
3564
3565static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3566{
3567 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
3568
3569 *db = (ar >> 14) & 1;
3570 *l = (ar >> 13) & 1;
3571}
3572
3573static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3574{
3575 dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
3576 dt->address = vmcs_readl(GUEST_IDTR_BASE);
3577}
3578
3579static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3580{
3581 vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
3582 vmcs_writel(GUEST_IDTR_BASE, dt->address);
3583}
3584
3585static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3586{
3587 dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
3588 dt->address = vmcs_readl(GUEST_GDTR_BASE);
3589}
3590
3591static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3592{
3593 vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
3594 vmcs_writel(GUEST_GDTR_BASE, dt->address);
3595}
3596
3597static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
3598{
3599 struct kvm_segment var;
3600 u32 ar;
3601
3602 vmx_get_segment(vcpu, &var, seg);
3603 var.dpl = 0x3;
3604 if (seg == VCPU_SREG_CS)
3605 var.type = 0x3;
3606 ar = vmx_segment_access_rights(&var);
3607
3608 if (var.base != (var.selector << 4))
3609 return false;
3610 if (var.limit != 0xffff)
3611 return false;
3612 if (ar != 0xf3)
3613 return false;
3614
3615 return true;
3616}
3617
3618static bool code_segment_valid(struct kvm_vcpu *vcpu)
3619{
3620 struct kvm_segment cs;
3621 unsigned int cs_rpl;
3622
3623 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3624 cs_rpl = cs.selector & SELECTOR_RPL_MASK;
3625
3626 if (cs.unusable)
3627 return false;
3628 if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
3629 return false;
3630 if (!cs.s)
3631 return false;
3632 if (cs.type & AR_TYPE_WRITEABLE_MASK) {
3633 if (cs.dpl > cs_rpl)
3634 return false;
3635 } else {
3636 if (cs.dpl != cs_rpl)
3637 return false;
3638 }
3639 if (!cs.present)
3640 return false;
3641
3642
3643 return true;
3644}
3645
3646static bool stack_segment_valid(struct kvm_vcpu *vcpu)
3647{
3648 struct kvm_segment ss;
3649 unsigned int ss_rpl;
3650
3651 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3652 ss_rpl = ss.selector & SELECTOR_RPL_MASK;
3653
3654 if (ss.unusable)
3655 return true;
3656 if (ss.type != 3 && ss.type != 7)
3657 return false;
3658 if (!ss.s)
3659 return false;
3660 if (ss.dpl != ss_rpl)
3661 return false;
3662 if (!ss.present)
3663 return false;
3664
3665 return true;
3666}
3667
3668static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
3669{
3670 struct kvm_segment var;
3671 unsigned int rpl;
3672
3673 vmx_get_segment(vcpu, &var, seg);
3674 rpl = var.selector & SELECTOR_RPL_MASK;
3675
3676 if (var.unusable)
3677 return true;
3678 if (!var.s)
3679 return false;
3680 if (!var.present)
3681 return false;
3682 if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
3683 if (var.dpl < rpl)
3684 return false;
3685 }
3686
3687
3688
3689
3690 return true;
3691}
3692
3693static bool tr_valid(struct kvm_vcpu *vcpu)
3694{
3695 struct kvm_segment tr;
3696
3697 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
3698
3699 if (tr.unusable)
3700 return false;
3701 if (tr.selector & SELECTOR_TI_MASK)
3702 return false;
3703 if (tr.type != 3 && tr.type != 11)
3704 return false;
3705 if (!tr.present)
3706 return false;
3707
3708 return true;
3709}
3710
3711static bool ldtr_valid(struct kvm_vcpu *vcpu)
3712{
3713 struct kvm_segment ldtr;
3714
3715 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
3716
3717 if (ldtr.unusable)
3718 return true;
3719 if (ldtr.selector & SELECTOR_TI_MASK)
3720 return false;
3721 if (ldtr.type != 2)
3722 return false;
3723 if (!ldtr.present)
3724 return false;
3725
3726 return true;
3727}
3728
3729static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
3730{
3731 struct kvm_segment cs, ss;
3732
3733 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3734 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3735
3736 return ((cs.selector & SELECTOR_RPL_MASK) ==
3737 (ss.selector & SELECTOR_RPL_MASK));
3738}
3739
3740
3741
3742
3743
3744
3745static bool guest_state_valid(struct kvm_vcpu *vcpu)
3746{
3747 if (enable_unrestricted_guest)
3748 return true;
3749
3750
3751 if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
3752 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3753 return false;
3754 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3755 return false;
3756 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3757 return false;
3758 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3759 return false;
3760 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3761 return false;
3762 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3763 return false;
3764 } else {
3765
3766 if (!cs_ss_rpl_check(vcpu))
3767 return false;
3768 if (!code_segment_valid(vcpu))
3769 return false;
3770 if (!stack_segment_valid(vcpu))
3771 return false;
3772 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3773 return false;
3774 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3775 return false;
3776 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3777 return false;
3778 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
3779 return false;
3780 if (!tr_valid(vcpu))
3781 return false;
3782 if (!ldtr_valid(vcpu))
3783 return false;
3784 }
3785
3786
3787
3788
3789
3790 return true;
3791}
3792
3793static int init_rmode_tss(struct kvm *kvm)
3794{
3795 gfn_t fn;
3796 u16 data = 0;
3797 int r, idx, ret = 0;
3798
3799 idx = srcu_read_lock(&kvm->srcu);
3800 fn = kvm->arch.tss_addr >> PAGE_SHIFT;
3801 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
3802 if (r < 0)
3803 goto out;
3804 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
3805 r = kvm_write_guest_page(kvm, fn++, &data,
3806 TSS_IOPB_BASE_OFFSET, sizeof(u16));
3807 if (r < 0)
3808 goto out;
3809 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
3810 if (r < 0)
3811 goto out;
3812 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
3813 if (r < 0)
3814 goto out;
3815 data = ~0;
3816 r = kvm_write_guest_page(kvm, fn, &data,
3817 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
3818 sizeof(u8));
3819 if (r < 0)
3820 goto out;
3821
3822 ret = 1;
3823out:
3824 srcu_read_unlock(&kvm->srcu, idx);
3825 return ret;
3826}
3827
3828static int init_rmode_identity_map(struct kvm *kvm)
3829{
3830 int i, idx, r, ret;
3831 pfn_t identity_map_pfn;
3832 u32 tmp;
3833
3834 if (!enable_ept)
3835 return 1;
3836 if (unlikely(!kvm->arch.ept_identity_pagetable)) {
3837 printk(KERN_ERR "EPT: identity-mapping pagetable "
3838 "haven't been allocated!\n");
3839 return 0;
3840 }
3841 if (likely(kvm->arch.ept_identity_pagetable_done))
3842 return 1;
3843 ret = 0;
3844 identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
3845 idx = srcu_read_lock(&kvm->srcu);
3846 r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
3847 if (r < 0)
3848 goto out;
3849
3850 for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
3851 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
3852 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
3853 r = kvm_write_guest_page(kvm, identity_map_pfn,
3854 &tmp, i * sizeof(tmp), sizeof(tmp));
3855 if (r < 0)
3856 goto out;
3857 }
3858 kvm->arch.ept_identity_pagetable_done = true;
3859 ret = 1;
3860out:
3861 srcu_read_unlock(&kvm->srcu, idx);
3862 return ret;
3863}
3864
3865static void seg_setup(int seg)
3866{
3867 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3868 unsigned int ar;
3869
3870 vmcs_write16(sf->selector, 0);
3871 vmcs_writel(sf->base, 0);
3872 vmcs_write32(sf->limit, 0xffff);
3873 ar = 0x93;
3874 if (seg == VCPU_SREG_CS)
3875 ar |= 0x08;
3876
3877 vmcs_write32(sf->ar_bytes, ar);
3878}
3879
3880static int alloc_apic_access_page(struct kvm *kvm)
3881{
3882 struct page *page;
3883 struct kvm_userspace_memory_region kvm_userspace_mem;
3884 int r = 0;
3885
3886 mutex_lock(&kvm->slots_lock);
3887 if (kvm->arch.apic_access_page)
3888 goto out;
3889 kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
3890 kvm_userspace_mem.flags = 0;
3891 kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
3892 kvm_userspace_mem.memory_size = PAGE_SIZE;
3893 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem);
3894 if (r)
3895 goto out;
3896
3897 page = gfn_to_page(kvm, 0xfee00);
3898 if (is_error_page(page)) {
3899 r = -EFAULT;
3900 goto out;
3901 }
3902
3903 kvm->arch.apic_access_page = page;
3904out:
3905 mutex_unlock(&kvm->slots_lock);
3906 return r;
3907}
3908
3909static int alloc_identity_pagetable(struct kvm *kvm)
3910{
3911 struct page *page;
3912 struct kvm_userspace_memory_region kvm_userspace_mem;
3913 int r = 0;
3914
3915 mutex_lock(&kvm->slots_lock);
3916 if (kvm->arch.ept_identity_pagetable)
3917 goto out;
3918 kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
3919 kvm_userspace_mem.flags = 0;
3920 kvm_userspace_mem.guest_phys_addr =
3921 kvm->arch.ept_identity_map_addr;
3922 kvm_userspace_mem.memory_size = PAGE_SIZE;
3923 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem);
3924 if (r)
3925 goto out;
3926
3927 page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
3928 if (is_error_page(page)) {
3929 r = -EFAULT;
3930 goto out;
3931 }
3932
3933 kvm->arch.ept_identity_pagetable = page;
3934out:
3935 mutex_unlock(&kvm->slots_lock);
3936 return r;
3937}
3938
3939static void allocate_vpid(struct vcpu_vmx *vmx)
3940{
3941 int vpid;
3942
3943 vmx->vpid = 0;
3944 if (!enable_vpid)
3945 return;
3946 spin_lock(&vmx_vpid_lock);
3947 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
3948 if (vpid < VMX_NR_VPIDS) {
3949 vmx->vpid = vpid;
3950 __set_bit(vpid, vmx_vpid_bitmap);
3951 }
3952 spin_unlock(&vmx_vpid_lock);
3953}
3954
3955static void free_vpid(struct vcpu_vmx *vmx)
3956{
3957 if (!enable_vpid)
3958 return;
3959 spin_lock(&vmx_vpid_lock);
3960 if (vmx->vpid != 0)
3961 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
3962 spin_unlock(&vmx_vpid_lock);
3963}
3964
3965#define MSR_TYPE_R 1
3966#define MSR_TYPE_W 2
3967static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
3968 u32 msr, int type)
3969{
3970 int f = sizeof(unsigned long);
3971
3972 if (!cpu_has_vmx_msr_bitmap())
3973 return;
3974
3975
3976
3977
3978
3979
3980 if (msr <= 0x1fff) {
3981 if (type & MSR_TYPE_R)
3982
3983 __clear_bit(msr, msr_bitmap + 0x000 / f);
3984
3985 if (type & MSR_TYPE_W)
3986
3987 __clear_bit(msr, msr_bitmap + 0x800 / f);
3988
3989 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
3990 msr &= 0x1fff;
3991 if (type & MSR_TYPE_R)
3992
3993 __clear_bit(msr, msr_bitmap + 0x400 / f);
3994
3995 if (type & MSR_TYPE_W)
3996
3997 __clear_bit(msr, msr_bitmap + 0xc00 / f);
3998
3999 }
4000}
4001
4002static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
4003 u32 msr, int type)
4004{
4005 int f = sizeof(unsigned long);
4006
4007 if (!cpu_has_vmx_msr_bitmap())
4008 return;
4009
4010
4011
4012
4013
4014
4015 if (msr <= 0x1fff) {
4016 if (type & MSR_TYPE_R)
4017
4018 __set_bit(msr, msr_bitmap + 0x000 / f);
4019
4020 if (type & MSR_TYPE_W)
4021
4022 __set_bit(msr, msr_bitmap + 0x800 / f);
4023
4024 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
4025 msr &= 0x1fff;
4026 if (type & MSR_TYPE_R)
4027
4028 __set_bit(msr, msr_bitmap + 0x400 / f);
4029
4030 if (type & MSR_TYPE_W)
4031
4032 __set_bit(msr, msr_bitmap + 0xc00 / f);
4033
4034 }
4035}
4036
4037static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
4038{
4039 if (!longmode_only)
4040 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
4041 msr, MSR_TYPE_R | MSR_TYPE_W);
4042 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
4043 msr, MSR_TYPE_R | MSR_TYPE_W);
4044}
4045
4046static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
4047{
4048 __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
4049 msr, MSR_TYPE_R);
4050 __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
4051 msr, MSR_TYPE_R);
4052}
4053
4054static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
4055{
4056 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
4057 msr, MSR_TYPE_R);
4058 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
4059 msr, MSR_TYPE_R);
4060}
4061
4062static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
4063{
4064 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
4065 msr, MSR_TYPE_W);
4066 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
4067 msr, MSR_TYPE_W);
4068}
4069
4070static int vmx_vm_has_apicv(struct kvm *kvm)
4071{
4072 return enable_apicv && irqchip_in_kernel(kvm);
4073}
4074
4075
4076
4077
4078
4079
4080
4081
4082static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
4083{
4084 struct vcpu_vmx *vmx = to_vmx(vcpu);
4085 int r;
4086
4087 if (pi_test_and_set_pir(vector, &vmx->pi_desc))
4088 return;
4089
4090 r = pi_test_and_set_on(&vmx->pi_desc);
4091 kvm_make_request(KVM_REQ_EVENT, vcpu);
4092#ifdef CONFIG_SMP
4093 if (!r && (vcpu->mode == IN_GUEST_MODE))
4094 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
4095 POSTED_INTR_VECTOR);
4096 else
4097#endif
4098 kvm_vcpu_kick(vcpu);
4099}
4100
4101static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
4102{
4103 struct vcpu_vmx *vmx = to_vmx(vcpu);
4104
4105 if (!pi_test_and_clear_on(&vmx->pi_desc))
4106 return;
4107
4108 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
4109}
4110
4111static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu)
4112{
4113 return;
4114}
4115
4116
4117
4118
4119
4120
4121
4122static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4123{
4124 u32 low32, high32;
4125 unsigned long tmpl;
4126 struct desc_ptr dt;
4127
4128 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS);
4129 vmcs_writel(HOST_CR4, read_cr4());
4130 vmcs_writel(HOST_CR3, read_cr3());
4131
4132 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);
4133#ifdef CONFIG_X86_64
4134
4135
4136
4137
4138
4139 vmcs_write16(HOST_DS_SELECTOR, 0);
4140 vmcs_write16(HOST_ES_SELECTOR, 0);
4141#else
4142 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);
4143 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);
4144#endif
4145 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);
4146 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);
4147
4148 native_store_idt(&dt);
4149 vmcs_writel(HOST_IDTR_BASE, dt.address);
4150 vmx->host_idt_base = dt.address;
4151
4152 vmcs_writel(HOST_RIP, vmx_return);
4153
4154 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
4155 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
4156 rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
4157 vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);
4158
4159 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
4160 rdmsr(MSR_IA32_CR_PAT, low32, high32);
4161 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
4162 }
4163}
4164
4165static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
4166{
4167 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
4168 if (enable_ept)
4169 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
4170 if (is_guest_mode(&vmx->vcpu))
4171 vmx->vcpu.arch.cr4_guest_owned_bits &=
4172 ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
4173 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
4174}
4175
4176static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4177{
4178 u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
4179
4180 if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
4181 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
4182 return pin_based_exec_ctrl;
4183}
4184
4185static u32 vmx_exec_control(struct vcpu_vmx *vmx)
4186{
4187 u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
4188 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
4189 exec_control &= ~CPU_BASED_TPR_SHADOW;
4190#ifdef CONFIG_X86_64
4191 exec_control |= CPU_BASED_CR8_STORE_EXITING |
4192 CPU_BASED_CR8_LOAD_EXITING;
4193#endif
4194 }
4195 if (!enable_ept)
4196 exec_control |= CPU_BASED_CR3_STORE_EXITING |
4197 CPU_BASED_CR3_LOAD_EXITING |
4198 CPU_BASED_INVLPG_EXITING;
4199 return exec_control;
4200}
4201
4202static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
4203{
4204 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
4205 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
4206 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
4207 if (vmx->vpid == 0)
4208 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
4209 if (!enable_ept) {
4210 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
4211 enable_unrestricted_guest = 0;
4212
4213 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
4214 }
4215 if (!enable_unrestricted_guest)
4216 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
4217 if (!ple_gap)
4218 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
4219 if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
4220 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
4221 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4222 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
4223
4224
4225
4226
4227
4228 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
4229 return exec_control;
4230}
4231
4232static void ept_set_mmio_spte_mask(void)
4233{
4234
4235
4236
4237
4238
4239
4240 kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull);
4241}
4242
4243
4244
4245
4246static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
4247{
4248#ifdef CONFIG_X86_64
4249 unsigned long a;
4250#endif
4251 int i;
4252
4253
4254 vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
4255 vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
4256
4257 if (enable_shadow_vmcs) {
4258 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
4259 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
4260 }
4261 if (cpu_has_vmx_msr_bitmap())
4262 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
4263
4264 vmcs_write64(VMCS_LINK_POINTER, -1ull);
4265
4266
4267 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
4268
4269 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
4270
4271 if (cpu_has_secondary_exec_ctrls()) {
4272 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
4273 vmx_secondary_exec_control(vmx));
4274 }
4275
4276 if (vmx_vm_has_apicv(vmx->vcpu.kvm)) {
4277 vmcs_write64(EOI_EXIT_BITMAP0, 0);
4278 vmcs_write64(EOI_EXIT_BITMAP1, 0);
4279 vmcs_write64(EOI_EXIT_BITMAP2, 0);
4280 vmcs_write64(EOI_EXIT_BITMAP3, 0);
4281
4282 vmcs_write16(GUEST_INTR_STATUS, 0);
4283
4284 vmcs_write64(POSTED_INTR_NV, POSTED_INTR_VECTOR);
4285 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
4286 }
4287
4288 if (ple_gap) {
4289 vmcs_write32(PLE_GAP, ple_gap);
4290 vmcs_write32(PLE_WINDOW, ple_window);
4291 }
4292
4293 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
4294 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
4295 vmcs_write32(CR3_TARGET_COUNT, 0);
4296
4297 vmcs_write16(HOST_FS_SELECTOR, 0);
4298 vmcs_write16(HOST_GS_SELECTOR, 0);
4299 vmx_set_constant_host_state(vmx);
4300#ifdef CONFIG_X86_64
4301 rdmsrl(MSR_FS_BASE, a);
4302 vmcs_writel(HOST_FS_BASE, a);
4303 rdmsrl(MSR_GS_BASE, a);
4304 vmcs_writel(HOST_GS_BASE, a);
4305#else
4306 vmcs_writel(HOST_FS_BASE, 0);
4307 vmcs_writel(HOST_GS_BASE, 0);
4308#endif
4309
4310 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
4311 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
4312 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
4313 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
4314 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
4315
4316 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
4317 u32 msr_low, msr_high;
4318 u64 host_pat;
4319 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
4320 host_pat = msr_low | ((u64) msr_high << 32);
4321
4322 vmcs_write64(GUEST_IA32_PAT, host_pat);
4323
4324 vmx->vcpu.arch.pat = host_pat;
4325 }
4326
4327 for (i = 0; i < NR_VMX_MSR; ++i) {
4328 u32 index = vmx_msr_index[i];
4329 u32 data_low, data_high;
4330 int j = vmx->nmsrs;
4331
4332 if (rdmsr_safe(index, &data_low, &data_high) < 0)
4333 continue;
4334 if (wrmsr_safe(index, data_low, data_high) < 0)
4335 continue;
4336 vmx->guest_msrs[j].index = i;
4337 vmx->guest_msrs[j].data = 0;
4338 vmx->guest_msrs[j].mask = -1ull;
4339 ++vmx->nmsrs;
4340 }
4341
4342 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
4343
4344
4345 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
4346
4347 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
4348 set_cr4_guest_host_mask(vmx);
4349
4350 return 0;
4351}
4352
4353static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4354{
4355 struct vcpu_vmx *vmx = to_vmx(vcpu);
4356 u64 msr;
4357
4358 vmx->rmode.vm86_active = 0;
4359
4360 vmx->soft_vnmi_blocked = 0;
4361
4362 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
4363 kvm_set_cr8(&vmx->vcpu, 0);
4364 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
4365 if (kvm_vcpu_is_bsp(&vmx->vcpu))
4366 msr |= MSR_IA32_APICBASE_BSP;
4367 kvm_set_apic_base(&vmx->vcpu, msr);
4368
4369 vmx_segment_cache_clear(vmx);
4370
4371 seg_setup(VCPU_SREG_CS);
4372 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
4373 vmcs_write32(GUEST_CS_BASE, 0xffff0000);
4374
4375 seg_setup(VCPU_SREG_DS);
4376 seg_setup(VCPU_SREG_ES);
4377 seg_setup(VCPU_SREG_FS);
4378 seg_setup(VCPU_SREG_GS);
4379 seg_setup(VCPU_SREG_SS);
4380
4381 vmcs_write16(GUEST_TR_SELECTOR, 0);
4382 vmcs_writel(GUEST_TR_BASE, 0);
4383 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
4384 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
4385
4386 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
4387 vmcs_writel(GUEST_LDTR_BASE, 0);
4388 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
4389 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
4390
4391 vmcs_write32(GUEST_SYSENTER_CS, 0);
4392 vmcs_writel(GUEST_SYSENTER_ESP, 0);
4393 vmcs_writel(GUEST_SYSENTER_EIP, 0);
4394
4395 vmcs_writel(GUEST_RFLAGS, 0x02);
4396 kvm_rip_write(vcpu, 0xfff0);
4397
4398 vmcs_writel(GUEST_GDTR_BASE, 0);
4399 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
4400
4401 vmcs_writel(GUEST_IDTR_BASE, 0);
4402 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
4403
4404 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
4405 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
4406 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
4407
4408
4409 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4410
4411 setup_msrs(vmx);
4412
4413 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
4414
4415 if (cpu_has_vmx_tpr_shadow()) {
4416 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
4417 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
4418 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
4419 __pa(vmx->vcpu.arch.apic->regs));
4420 vmcs_write32(TPR_THRESHOLD, 0);
4421 }
4422
4423 if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
4424 vmcs_write64(APIC_ACCESS_ADDR,
4425 page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
4426
4427 if (vmx_vm_has_apicv(vcpu->kvm))
4428 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
4429
4430 if (vmx->vpid != 0)
4431 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4432
4433 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
4434 vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu));
4435 vmx_set_cr4(&vmx->vcpu, 0);
4436 vmx_set_efer(&vmx->vcpu, 0);
4437 vmx_fpu_activate(&vmx->vcpu);
4438 update_exception_bitmap(&vmx->vcpu);
4439
4440 vpid_sync_context(vmx);
4441}
4442
4443
4444
4445
4446
4447static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
4448{
4449 return get_vmcs12(vcpu)->pin_based_vm_exec_control &
4450 PIN_BASED_EXT_INTR_MASK;
4451}
4452
4453static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
4454{
4455 return get_vmcs12(vcpu)->pin_based_vm_exec_control &
4456 PIN_BASED_NMI_EXITING;
4457}
4458
4459static int enable_irq_window(struct kvm_vcpu *vcpu)
4460{
4461 u32 cpu_based_vm_exec_control;
4462
4463 if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
4464
4465
4466
4467
4468
4469
4470 return -EBUSY;
4471
4472 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4473 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
4474 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
4475 return 0;
4476}
4477
4478static int enable_nmi_window(struct kvm_vcpu *vcpu)
4479{
4480 u32 cpu_based_vm_exec_control;
4481
4482 if (!cpu_has_virtual_nmis())
4483 return enable_irq_window(vcpu);
4484
4485 if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI)
4486 return enable_irq_window(vcpu);
4487
4488 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4489 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
4490 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
4491 return 0;
4492}
4493
4494static void vmx_inject_irq(struct kvm_vcpu *vcpu)
4495{
4496 struct vcpu_vmx *vmx = to_vmx(vcpu);
4497 uint32_t intr;
4498 int irq = vcpu->arch.interrupt.nr;
4499
4500 trace_kvm_inj_virq(irq);
4501
4502 ++vcpu->stat.irq_injections;
4503 if (vmx->rmode.vm86_active) {
4504 int inc_eip = 0;
4505 if (vcpu->arch.interrupt.soft)
4506 inc_eip = vcpu->arch.event_exit_inst_len;
4507 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
4508 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4509 return;
4510 }
4511 intr = irq | INTR_INFO_VALID_MASK;
4512 if (vcpu->arch.interrupt.soft) {
4513 intr |= INTR_TYPE_SOFT_INTR;
4514 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
4515 vmx->vcpu.arch.event_exit_inst_len);
4516 } else
4517 intr |= INTR_TYPE_EXT_INTR;
4518 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
4519}
4520
4521static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
4522{
4523 struct vcpu_vmx *vmx = to_vmx(vcpu);
4524
4525 if (is_guest_mode(vcpu))
4526 return;
4527
4528 if (!cpu_has_virtual_nmis()) {
4529
4530
4531
4532
4533
4534
4535
4536
4537 vmx->soft_vnmi_blocked = 1;
4538 vmx->vnmi_blocked_time = 0;
4539 }
4540
4541 ++vcpu->stat.nmi_injections;
4542 vmx->nmi_known_unmasked = false;
4543 if (vmx->rmode.vm86_active) {
4544 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
4545 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4546 return;
4547 }
4548 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
4549 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
4550}
4551
4552static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
4553{
4554 if (!cpu_has_virtual_nmis())
4555 return to_vmx(vcpu)->soft_vnmi_blocked;
4556 if (to_vmx(vcpu)->nmi_known_unmasked)
4557 return false;
4558 return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
4559}
4560
4561static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
4562{
4563 struct vcpu_vmx *vmx = to_vmx(vcpu);
4564
4565 if (!cpu_has_virtual_nmis()) {
4566 if (vmx->soft_vnmi_blocked != masked) {
4567 vmx->soft_vnmi_blocked = masked;
4568 vmx->vnmi_blocked_time = 0;
4569 }
4570 } else {
4571 vmx->nmi_known_unmasked = !masked;
4572 if (masked)
4573 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
4574 GUEST_INTR_STATE_NMI);
4575 else
4576 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
4577 GUEST_INTR_STATE_NMI);
4578 }
4579}
4580
4581static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
4582{
4583 if (is_guest_mode(vcpu)) {
4584 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4585
4586 if (to_vmx(vcpu)->nested.nested_run_pending)
4587 return 0;
4588 if (nested_exit_on_nmi(vcpu)) {
4589 nested_vmx_vmexit(vcpu);
4590 vmcs12->vm_exit_reason = EXIT_REASON_EXCEPTION_NMI;
4591 vmcs12->vm_exit_intr_info = NMI_VECTOR |
4592 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK;
4593
4594
4595
4596
4597 vcpu->arch.nmi_pending = 0;
4598 vmx_set_nmi_mask(vcpu, true);
4599 return 0;
4600 }
4601 }
4602
4603 if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
4604 return 0;
4605
4606 return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
4607 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
4608 | GUEST_INTR_STATE_NMI));
4609}
4610
4611static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
4612{
4613 if (is_guest_mode(vcpu)) {
4614 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4615
4616 if (to_vmx(vcpu)->nested.nested_run_pending)
4617 return 0;
4618 if (nested_exit_on_intr(vcpu)) {
4619 nested_vmx_vmexit(vcpu);
4620 vmcs12->vm_exit_reason =
4621 EXIT_REASON_EXTERNAL_INTERRUPT;
4622 vmcs12->vm_exit_intr_info = 0;
4623
4624
4625
4626 }
4627 }
4628
4629 return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
4630 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
4631 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
4632}
4633
4634static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
4635{
4636 int ret;
4637 struct kvm_userspace_memory_region tss_mem = {
4638 .slot = TSS_PRIVATE_MEMSLOT,
4639 .guest_phys_addr = addr,
4640 .memory_size = PAGE_SIZE * 3,
4641 .flags = 0,
4642 };
4643
4644 ret = kvm_set_memory_region(kvm, &tss_mem);
4645 if (ret)
4646 return ret;
4647 kvm->arch.tss_addr = addr;
4648 if (!init_rmode_tss(kvm))
4649 return -ENOMEM;
4650
4651 return 0;
4652}
4653
4654static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
4655{
4656 switch (vec) {
4657 case BP_VECTOR:
4658
4659
4660
4661
4662 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
4663 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4664 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
4665 return false;
4666
4667 case DB_VECTOR:
4668 if (vcpu->guest_debug &
4669 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
4670 return false;
4671
4672 case DE_VECTOR:
4673 case OF_VECTOR:
4674 case BR_VECTOR:
4675 case UD_VECTOR:
4676 case DF_VECTOR:
4677 case SS_VECTOR:
4678 case GP_VECTOR:
4679 case MF_VECTOR:
4680 return true;
4681 break;
4682 }
4683 return false;
4684}
4685
4686static int handle_rmode_exception(struct kvm_vcpu *vcpu,
4687 int vec, u32 err_code)
4688{
4689
4690
4691
4692
4693 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
4694 if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
4695 if (vcpu->arch.halt_request) {
4696 vcpu->arch.halt_request = 0;
4697 return kvm_emulate_halt(vcpu);
4698 }
4699 return 1;
4700 }
4701 return 0;
4702 }
4703
4704
4705
4706
4707
4708
4709 kvm_queue_exception(vcpu, vec);
4710 return 1;
4711}
4712
4713
4714
4715
4716
4717
4718
4719
4720static void kvm_machine_check(void)
4721{
4722#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
4723 struct pt_regs regs = {
4724 .cs = 3,
4725 .flags = X86_EFLAGS_IF,
4726 };
4727
4728 do_machine_check(®s, 0);
4729#endif
4730}
4731
4732static int handle_machine_check(struct kvm_vcpu *vcpu)
4733{
4734
4735 return 1;
4736}
4737
4738static int handle_exception(struct kvm_vcpu *vcpu)
4739{
4740 struct vcpu_vmx *vmx = to_vmx(vcpu);
4741 struct kvm_run *kvm_run = vcpu->run;
4742 u32 intr_info, ex_no, error_code;
4743 unsigned long cr2, rip, dr6;
4744 u32 vect_info;
4745 enum emulation_result er;
4746
4747 vect_info = vmx->idt_vectoring_info;
4748 intr_info = vmx->exit_intr_info;
4749
4750 if (is_machine_check(intr_info))
4751 return handle_machine_check(vcpu);
4752
4753 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
4754 return 1;
4755
4756 if (is_no_device(intr_info)) {
4757 vmx_fpu_activate(vcpu);
4758 return 1;
4759 }
4760
4761 if (is_invalid_opcode(intr_info)) {
4762 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
4763 if (er != EMULATE_DONE)
4764 kvm_queue_exception(vcpu, UD_VECTOR);
4765 return 1;
4766 }
4767
4768 error_code = 0;
4769 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
4770 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
4771
4772
4773
4774
4775
4776
4777 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
4778 !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
4779 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4780 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
4781 vcpu->run->internal.ndata = 2;
4782 vcpu->run->internal.data[0] = vect_info;
4783 vcpu->run->internal.data[1] = intr_info;
4784 return 0;
4785 }
4786
4787 if (is_page_fault(intr_info)) {
4788
4789 BUG_ON(enable_ept);
4790 cr2 = vmcs_readl(EXIT_QUALIFICATION);
4791 trace_kvm_page_fault(cr2, error_code);
4792
4793 if (kvm_event_needs_reinjection(vcpu))
4794 kvm_mmu_unprotect_page_virt(vcpu, cr2);
4795 return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
4796 }
4797
4798 ex_no = intr_info & INTR_INFO_VECTOR_MASK;
4799
4800 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
4801 return handle_rmode_exception(vcpu, ex_no, error_code);
4802
4803 switch (ex_no) {
4804 case DB_VECTOR:
4805 dr6 = vmcs_readl(EXIT_QUALIFICATION);
4806 if (!(vcpu->guest_debug &
4807 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
4808 vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
4809 kvm_queue_exception(vcpu, DB_VECTOR);
4810 return 1;
4811 }
4812 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
4813 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
4814
4815 case BP_VECTOR:
4816
4817
4818
4819
4820
4821 vmx->vcpu.arch.event_exit_inst_len =
4822 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4823 kvm_run->exit_reason = KVM_EXIT_DEBUG;
4824 rip = kvm_rip_read(vcpu);
4825 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
4826 kvm_run->debug.arch.exception = ex_no;
4827 break;
4828 default:
4829 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
4830 kvm_run->ex.exception = ex_no;
4831 kvm_run->ex.error_code = error_code;
4832 break;
4833 }
4834 return 0;
4835}
4836
4837static int handle_external_interrupt(struct kvm_vcpu *vcpu)
4838{
4839 ++vcpu->stat.irq_exits;
4840 return 1;
4841}
4842
4843static int handle_triple_fault(struct kvm_vcpu *vcpu)
4844{
4845 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
4846 return 0;
4847}
4848
4849static int handle_io(struct kvm_vcpu *vcpu)
4850{
4851 unsigned long exit_qualification;
4852 int size, in, string;
4853 unsigned port;
4854
4855 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4856 string = (exit_qualification & 16) != 0;
4857 in = (exit_qualification & 8) != 0;
4858
4859 ++vcpu->stat.io_exits;
4860
4861 if (string || in)
4862 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
4863
4864 port = exit_qualification >> 16;
4865 size = (exit_qualification & 7) + 1;
4866 skip_emulated_instruction(vcpu);
4867
4868 return kvm_fast_pio_out(vcpu, size, port);
4869}
4870
4871static void
4872vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4873{
4874
4875
4876
4877 hypercall[0] = 0x0f;
4878 hypercall[1] = 0x01;
4879 hypercall[2] = 0xc1;
4880}
4881
4882
4883static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
4884{
4885 if (is_guest_mode(vcpu)) {
4886 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4887 unsigned long orig_val = val;
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897 val = (val & ~vmcs12->cr0_guest_host_mask) |
4898 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
4899
4900
4901
4902 if ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)
4903 return 1;
4904
4905 if (kvm_set_cr0(vcpu, val))
4906 return 1;
4907 vmcs_writel(CR0_READ_SHADOW, orig_val);
4908 return 0;
4909 } else {
4910 if (to_vmx(vcpu)->nested.vmxon &&
4911 ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
4912 return 1;
4913 return kvm_set_cr0(vcpu, val);
4914 }
4915}
4916
4917static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
4918{
4919 if (is_guest_mode(vcpu)) {
4920 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4921 unsigned long orig_val = val;
4922
4923
4924 val = (val & ~vmcs12->cr4_guest_host_mask) |
4925 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
4926 if (kvm_set_cr4(vcpu, val))
4927 return 1;
4928 vmcs_writel(CR4_READ_SHADOW, orig_val);
4929 return 0;
4930 } else
4931 return kvm_set_cr4(vcpu, val);
4932}
4933
4934
4935static void handle_clts(struct kvm_vcpu *vcpu)
4936{
4937 if (is_guest_mode(vcpu)) {
4938
4939
4940
4941
4942
4943 vmcs_writel(CR0_READ_SHADOW,
4944 vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS);
4945 vcpu->arch.cr0 &= ~X86_CR0_TS;
4946 } else
4947 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
4948}
4949
4950static int handle_cr(struct kvm_vcpu *vcpu)
4951{
4952 unsigned long exit_qualification, val;
4953 int cr;
4954 int reg;
4955 int err;
4956
4957 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4958 cr = exit_qualification & 15;
4959 reg = (exit_qualification >> 8) & 15;
4960 switch ((exit_qualification >> 4) & 3) {
4961 case 0:
4962 val = kvm_register_read(vcpu, reg);
4963 trace_kvm_cr_write(cr, val);
4964 switch (cr) {
4965 case 0:
4966 err = handle_set_cr0(vcpu, val);
4967 kvm_complete_insn_gp(vcpu, err);
4968 return 1;
4969 case 3:
4970 err = kvm_set_cr3(vcpu, val);
4971 kvm_complete_insn_gp(vcpu, err);
4972 return 1;
4973 case 4:
4974 err = handle_set_cr4(vcpu, val);
4975 kvm_complete_insn_gp(vcpu, err);
4976 return 1;
4977 case 8: {
4978 u8 cr8_prev = kvm_get_cr8(vcpu);
4979 u8 cr8 = kvm_register_read(vcpu, reg);
4980 err = kvm_set_cr8(vcpu, cr8);
4981 kvm_complete_insn_gp(vcpu, err);
4982 if (irqchip_in_kernel(vcpu->kvm))
4983 return 1;
4984 if (cr8_prev <= cr8)
4985 return 1;
4986 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
4987 return 0;
4988 }
4989 }
4990 break;
4991 case 2:
4992 handle_clts(vcpu);
4993 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
4994 skip_emulated_instruction(vcpu);
4995 vmx_fpu_activate(vcpu);
4996 return 1;
4997 case 1:
4998 switch (cr) {
4999 case 3:
5000 val = kvm_read_cr3(vcpu);
5001 kvm_register_write(vcpu, reg, val);
5002 trace_kvm_cr_read(cr, val);
5003 skip_emulated_instruction(vcpu);
5004 return 1;
5005 case 8:
5006 val = kvm_get_cr8(vcpu);
5007 kvm_register_write(vcpu, reg, val);
5008 trace_kvm_cr_read(cr, val);
5009 skip_emulated_instruction(vcpu);
5010 return 1;
5011 }
5012 break;
5013 case 3:
5014 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5015 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
5016 kvm_lmsw(vcpu, val);
5017
5018 skip_emulated_instruction(vcpu);
5019 return 1;
5020 default:
5021 break;
5022 }
5023 vcpu->run->exit_reason = 0;
5024 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
5025 (int)(exit_qualification >> 4) & 3, cr);
5026 return 0;
5027}
5028
5029static int handle_dr(struct kvm_vcpu *vcpu)
5030{
5031 unsigned long exit_qualification;
5032 int dr, reg;
5033
5034
5035 if (!kvm_require_cpl(vcpu, 0))
5036 return 1;
5037 dr = vmcs_readl(GUEST_DR7);
5038 if (dr & DR7_GD) {
5039
5040
5041
5042
5043
5044 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5045 vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
5046 vcpu->run->debug.arch.dr7 = dr;
5047 vcpu->run->debug.arch.pc =
5048 vmcs_readl(GUEST_CS_BASE) +
5049 vmcs_readl(GUEST_RIP);
5050 vcpu->run->debug.arch.exception = DB_VECTOR;
5051 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
5052 return 0;
5053 } else {
5054 vcpu->arch.dr7 &= ~DR7_GD;
5055 vcpu->arch.dr6 |= DR6_BD;
5056 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
5057 kvm_queue_exception(vcpu, DB_VECTOR);
5058 return 1;
5059 }
5060 }
5061
5062 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5063 dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
5064 reg = DEBUG_REG_ACCESS_REG(exit_qualification);
5065 if (exit_qualification & TYPE_MOV_FROM_DR) {
5066 unsigned long val;
5067 if (!kvm_get_dr(vcpu, dr, &val))
5068 kvm_register_write(vcpu, reg, val);
5069 } else
5070 kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
5071 skip_emulated_instruction(vcpu);
5072 return 1;
5073}
5074
5075static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
5076{
5077 vmcs_writel(GUEST_DR7, val);
5078}
5079
5080static int handle_cpuid(struct kvm_vcpu *vcpu)
5081{
5082 kvm_emulate_cpuid(vcpu);
5083 return 1;
5084}
5085
5086static int handle_rdmsr(struct kvm_vcpu *vcpu)
5087{
5088 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
5089 u64 data;
5090
5091 if (vmx_get_msr(vcpu, ecx, &data)) {
5092 trace_kvm_msr_read_ex(ecx);
5093 kvm_inject_gp(vcpu, 0);
5094 return 1;
5095 }
5096
5097 trace_kvm_msr_read(ecx, data);
5098
5099
5100 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
5101 vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
5102 skip_emulated_instruction(vcpu);
5103 return 1;
5104}
5105
5106static int handle_wrmsr(struct kvm_vcpu *vcpu)
5107{
5108 struct msr_data msr;
5109 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
5110 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
5111 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
5112
5113 msr.data = data;
5114 msr.index = ecx;
5115 msr.host_initiated = false;
5116 if (vmx_set_msr(vcpu, &msr) != 0) {
5117 trace_kvm_msr_write_ex(ecx, data);
5118 kvm_inject_gp(vcpu, 0);
5119 return 1;
5120 }
5121
5122 trace_kvm_msr_write(ecx, data);
5123 skip_emulated_instruction(vcpu);
5124 return 1;
5125}
5126
5127static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
5128{
5129 kvm_make_request(KVM_REQ_EVENT, vcpu);
5130 return 1;
5131}
5132
5133static int handle_interrupt_window(struct kvm_vcpu *vcpu)
5134{
5135 u32 cpu_based_vm_exec_control;
5136
5137
5138 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5139 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
5140 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5141
5142 kvm_make_request(KVM_REQ_EVENT, vcpu);
5143
5144 ++vcpu->stat.irq_window_exits;
5145
5146
5147
5148
5149
5150 if (!irqchip_in_kernel(vcpu->kvm) &&
5151 vcpu->run->request_interrupt_window &&
5152 !kvm_cpu_has_interrupt(vcpu)) {
5153 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
5154 return 0;
5155 }
5156 return 1;
5157}
5158
5159static int handle_halt(struct kvm_vcpu *vcpu)
5160{
5161 skip_emulated_instruction(vcpu);
5162 return kvm_emulate_halt(vcpu);
5163}
5164
5165static int handle_vmcall(struct kvm_vcpu *vcpu)
5166{
5167 skip_emulated_instruction(vcpu);
5168 kvm_emulate_hypercall(vcpu);
5169 return 1;
5170}
5171
5172static int handle_invd(struct kvm_vcpu *vcpu)
5173{
5174 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
5175}
5176
5177static int handle_invlpg(struct kvm_vcpu *vcpu)
5178{
5179 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5180
5181 kvm_mmu_invlpg(vcpu, exit_qualification);
5182 skip_emulated_instruction(vcpu);
5183 return 1;
5184}
5185
5186static int handle_rdpmc(struct kvm_vcpu *vcpu)
5187{
5188 int err;
5189
5190 err = kvm_rdpmc(vcpu);
5191 kvm_complete_insn_gp(vcpu, err);
5192
5193 return 1;
5194}
5195
5196static int handle_wbinvd(struct kvm_vcpu *vcpu)
5197{
5198 skip_emulated_instruction(vcpu);
5199 kvm_emulate_wbinvd(vcpu);
5200 return 1;
5201}
5202
5203static int handle_xsetbv(struct kvm_vcpu *vcpu)
5204{
5205 u64 new_bv = kvm_read_edx_eax(vcpu);
5206 u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
5207
5208 if (kvm_set_xcr(vcpu, index, new_bv) == 0)
5209 skip_emulated_instruction(vcpu);
5210 return 1;
5211}
5212
5213static int handle_apic_access(struct kvm_vcpu *vcpu)
5214{
5215 if (likely(fasteoi)) {
5216 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5217 int access_type, offset;
5218
5219 access_type = exit_qualification & APIC_ACCESS_TYPE;
5220 offset = exit_qualification & APIC_ACCESS_OFFSET;
5221
5222
5223
5224
5225
5226 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
5227 (offset == APIC_EOI)) {
5228 kvm_lapic_set_eoi(vcpu);
5229 skip_emulated_instruction(vcpu);
5230 return 1;
5231 }
5232 }
5233 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
5234}
5235
5236static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
5237{
5238 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5239 int vector = exit_qualification & 0xff;
5240
5241
5242 kvm_apic_set_eoi_accelerated(vcpu, vector);
5243 return 1;
5244}
5245
5246static int handle_apic_write(struct kvm_vcpu *vcpu)
5247{
5248 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5249 u32 offset = exit_qualification & 0xfff;
5250
5251
5252 kvm_apic_write_nodecode(vcpu, offset);
5253 return 1;
5254}
5255
5256static int handle_task_switch(struct kvm_vcpu *vcpu)
5257{
5258 struct vcpu_vmx *vmx = to_vmx(vcpu);
5259 unsigned long exit_qualification;
5260 bool has_error_code = false;
5261 u32 error_code = 0;
5262 u16 tss_selector;
5263 int reason, type, idt_v, idt_index;
5264
5265 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
5266 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
5267 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
5268
5269 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5270
5271 reason = (u32)exit_qualification >> 30;
5272 if (reason == TASK_SWITCH_GATE && idt_v) {
5273 switch (type) {
5274 case INTR_TYPE_NMI_INTR:
5275 vcpu->arch.nmi_injected = false;
5276 vmx_set_nmi_mask(vcpu, true);
5277 break;
5278 case INTR_TYPE_EXT_INTR:
5279 case INTR_TYPE_SOFT_INTR:
5280 kvm_clear_interrupt_queue(vcpu);
5281 break;
5282 case INTR_TYPE_HARD_EXCEPTION:
5283 if (vmx->idt_vectoring_info &
5284 VECTORING_INFO_DELIVER_CODE_MASK) {
5285 has_error_code = true;
5286 error_code =
5287 vmcs_read32(IDT_VECTORING_ERROR_CODE);
5288 }
5289
5290 case INTR_TYPE_SOFT_EXCEPTION:
5291 kvm_clear_exception_queue(vcpu);
5292 break;
5293 default:
5294 break;
5295 }
5296 }
5297 tss_selector = exit_qualification;
5298
5299 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
5300 type != INTR_TYPE_EXT_INTR &&
5301 type != INTR_TYPE_NMI_INTR))
5302 skip_emulated_instruction(vcpu);
5303
5304 if (kvm_task_switch(vcpu, tss_selector,
5305 type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason,
5306 has_error_code, error_code) == EMULATE_FAIL) {
5307 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5308 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
5309 vcpu->run->internal.ndata = 0;
5310 return 0;
5311 }
5312
5313
5314 vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
5315
5316
5317
5318
5319
5320
5321 return 1;
5322}
5323
5324static int handle_ept_violation(struct kvm_vcpu *vcpu)
5325{
5326 unsigned long exit_qualification;
5327 gpa_t gpa;
5328 u32 error_code;
5329 int gla_validity;
5330
5331 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5332
5333 gla_validity = (exit_qualification >> 7) & 0x3;
5334 if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
5335 printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
5336 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
5337 (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
5338 vmcs_readl(GUEST_LINEAR_ADDRESS));
5339 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
5340 (long unsigned int)exit_qualification);
5341 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
5342 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
5343 return 0;
5344 }
5345
5346
5347
5348
5349
5350
5351
5352 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5353 cpu_has_virtual_nmis() &&
5354 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
5355 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
5356
5357 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5358 trace_kvm_page_fault(gpa, exit_qualification);
5359
5360
5361 error_code = exit_qualification & (1U << 1);
5362
5363 error_code |= (exit_qualification & (1U << 2)) << 2;
5364
5365 error_code |= (exit_qualification >> 3) & 0x1;
5366
5367 vcpu->arch.exit_qualification = exit_qualification;
5368
5369 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
5370}
5371
5372static u64 ept_rsvd_mask(u64 spte, int level)
5373{
5374 int i;
5375 u64 mask = 0;
5376
5377 for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
5378 mask |= (1ULL << i);
5379
5380 if (level > 2)
5381
5382 mask |= 0xf8;
5383 else if (level == 2) {
5384 if (spte & (1ULL << 7))
5385
5386 mask |= 0x1ff000;
5387 else
5388
5389 mask |= 0x78;
5390 }
5391
5392 return mask;
5393}
5394
5395static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
5396 int level)
5397{
5398 printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
5399
5400
5401 WARN_ON((spte & 0x7) == 0x2);
5402
5403
5404 WARN_ON((spte & 0x7) == 0x6);
5405
5406
5407 if (!cpu_has_vmx_ept_execute_only())
5408 WARN_ON((spte & 0x7) == 0x4);
5409
5410
5411 if ((spte & 0x7)) {
5412 u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
5413
5414 if (rsvd_bits != 0) {
5415 printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
5416 __func__, rsvd_bits);
5417 WARN_ON(1);
5418 }
5419
5420 if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) {
5421 u64 ept_mem_type = (spte & 0x38) >> 3;
5422
5423 if (ept_mem_type == 2 || ept_mem_type == 3 ||
5424 ept_mem_type == 7) {
5425 printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
5426 __func__, ept_mem_type);
5427 WARN_ON(1);
5428 }
5429 }
5430 }
5431}
5432
5433static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
5434{
5435 u64 sptes[4];
5436 int nr_sptes, i, ret;
5437 gpa_t gpa;
5438
5439 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5440
5441 ret = handle_mmio_page_fault_common(vcpu, gpa, true);
5442 if (likely(ret == RET_MMIO_PF_EMULATE))
5443 return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
5444 EMULATE_DONE;
5445
5446 if (unlikely(ret == RET_MMIO_PF_INVALID))
5447 return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0);
5448
5449 if (unlikely(ret == RET_MMIO_PF_RETRY))
5450 return 1;
5451
5452
5453 printk(KERN_ERR "EPT: Misconfiguration.\n");
5454 printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
5455
5456 nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
5457
5458 for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
5459 ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
5460
5461 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
5462 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
5463
5464 return 0;
5465}
5466
5467static int handle_nmi_window(struct kvm_vcpu *vcpu)
5468{
5469 u32 cpu_based_vm_exec_control;
5470
5471
5472 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5473 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
5474 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5475 ++vcpu->stat.nmi_window_exits;
5476 kvm_make_request(KVM_REQ_EVENT, vcpu);
5477
5478 return 1;
5479}
5480
5481static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
5482{
5483 struct vcpu_vmx *vmx = to_vmx(vcpu);
5484 enum emulation_result err = EMULATE_DONE;
5485 int ret = 1;
5486 u32 cpu_exec_ctrl;
5487 bool intr_window_requested;
5488 unsigned count = 130;
5489
5490 cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5491 intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
5492
5493 while (!guest_state_valid(vcpu) && count-- != 0) {
5494 if (intr_window_requested && vmx_interrupt_allowed(vcpu))
5495 return handle_interrupt_window(&vmx->vcpu);
5496
5497 if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
5498 return 1;
5499
5500 err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
5501
5502 if (err == EMULATE_USER_EXIT) {
5503 ++vcpu->stat.mmio_exits;
5504 ret = 0;
5505 goto out;
5506 }
5507
5508 if (err != EMULATE_DONE) {
5509 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5510 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
5511 vcpu->run->internal.ndata = 0;
5512 return 0;
5513 }
5514
5515 if (vcpu->arch.halt_request) {
5516 vcpu->arch.halt_request = 0;
5517 ret = kvm_emulate_halt(vcpu);
5518 goto out;
5519 }
5520
5521 if (signal_pending(current))
5522 goto out;
5523 if (need_resched())
5524 schedule();
5525 }
5526
5527 vmx->emulation_required = emulation_required(vcpu);
5528out:
5529 return ret;
5530}
5531
5532
5533
5534
5535
5536static int handle_pause(struct kvm_vcpu *vcpu)
5537{
5538 skip_emulated_instruction(vcpu);
5539 kvm_vcpu_on_spin(vcpu);
5540
5541 return 1;
5542}
5543
5544static int handle_invalid_op(struct kvm_vcpu *vcpu)
5545{
5546 kvm_queue_exception(vcpu, UD_VECTOR);
5547 return 1;
5548}
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561
5562
5563
5564static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
5565{
5566 struct vmcs02_list *item;
5567 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
5568 if (item->vmptr == vmx->nested.current_vmptr) {
5569 list_move(&item->list, &vmx->nested.vmcs02_pool);
5570 return &item->vmcs02;
5571 }
5572
5573 if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
5574
5575 item = list_entry(vmx->nested.vmcs02_pool.prev,
5576 struct vmcs02_list, list);
5577 item->vmptr = vmx->nested.current_vmptr;
5578 list_move(&item->list, &vmx->nested.vmcs02_pool);
5579 return &item->vmcs02;
5580 }
5581
5582
5583 item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
5584 if (!item)
5585 return NULL;
5586 item->vmcs02.vmcs = alloc_vmcs();
5587 if (!item->vmcs02.vmcs) {
5588 kfree(item);
5589 return NULL;
5590 }
5591 loaded_vmcs_init(&item->vmcs02);
5592 item->vmptr = vmx->nested.current_vmptr;
5593 list_add(&(item->list), &(vmx->nested.vmcs02_pool));
5594 vmx->nested.vmcs02_num++;
5595 return &item->vmcs02;
5596}
5597
5598
5599static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
5600{
5601 struct vmcs02_list *item;
5602 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
5603 if (item->vmptr == vmptr) {
5604 free_loaded_vmcs(&item->vmcs02);
5605 list_del(&item->list);
5606 kfree(item);
5607 vmx->nested.vmcs02_num--;
5608 return;
5609 }
5610}
5611
5612
5613
5614
5615
5616
5617static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
5618{
5619 struct vmcs02_list *item, *n;
5620 list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
5621 if (vmx->loaded_vmcs != &item->vmcs02)
5622 free_loaded_vmcs(&item->vmcs02);
5623 list_del(&item->list);
5624 kfree(item);
5625 }
5626 vmx->nested.vmcs02_num = 0;
5627
5628 if (vmx->loaded_vmcs != &vmx->vmcs01)
5629 free_loaded_vmcs(&vmx->vmcs01);
5630}
5631
5632
5633
5634
5635
5636
5637static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
5638{
5639 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
5640 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
5641 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
5642}
5643
5644static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
5645{
5646 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
5647 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
5648 X86_EFLAGS_SF | X86_EFLAGS_OF))
5649 | X86_EFLAGS_CF);
5650}
5651
5652static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
5653 u32 vm_instruction_error)
5654{
5655 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
5656
5657
5658
5659
5660 nested_vmx_failInvalid(vcpu);
5661 return;
5662 }
5663 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
5664 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
5665 X86_EFLAGS_SF | X86_EFLAGS_OF))
5666 | X86_EFLAGS_ZF);
5667 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
5668
5669
5670
5671
5672}
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682static int handle_vmon(struct kvm_vcpu *vcpu)
5683{
5684 struct kvm_segment cs;
5685 struct vcpu_vmx *vmx = to_vmx(vcpu);
5686 struct vmcs *shadow_vmcs;
5687 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
5688 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
5689
5690
5691
5692
5693
5694
5695 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) ||
5696 !kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
5697 (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
5698 kvm_queue_exception(vcpu, UD_VECTOR);
5699 return 1;
5700 }
5701
5702 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
5703 if (is_long_mode(vcpu) && !cs.l) {
5704 kvm_queue_exception(vcpu, UD_VECTOR);
5705 return 1;
5706 }
5707
5708 if (vmx_get_cpl(vcpu)) {
5709 kvm_inject_gp(vcpu, 0);
5710 return 1;
5711 }
5712 if (vmx->nested.vmxon) {
5713 nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
5714 skip_emulated_instruction(vcpu);
5715 return 1;
5716 }
5717
5718 if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
5719 != VMXON_NEEDED_FEATURES) {
5720 kvm_inject_gp(vcpu, 0);
5721 return 1;
5722 }
5723
5724 if (enable_shadow_vmcs) {
5725 shadow_vmcs = alloc_vmcs();
5726 if (!shadow_vmcs)
5727 return -ENOMEM;
5728
5729 shadow_vmcs->revision_id |= (1u << 31);
5730
5731 vmcs_clear(shadow_vmcs);
5732 vmx->nested.current_shadow_vmcs = shadow_vmcs;
5733 }
5734
5735 INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
5736 vmx->nested.vmcs02_num = 0;
5737
5738 vmx->nested.vmxon = true;
5739
5740 skip_emulated_instruction(vcpu);
5741 nested_vmx_succeed(vcpu);
5742 return 1;
5743}
5744
5745
5746
5747
5748
5749
5750static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
5751{
5752 struct kvm_segment cs;
5753 struct vcpu_vmx *vmx = to_vmx(vcpu);
5754
5755 if (!vmx->nested.vmxon) {
5756 kvm_queue_exception(vcpu, UD_VECTOR);
5757 return 0;
5758 }
5759
5760 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
5761 if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
5762 (is_long_mode(vcpu) && !cs.l)) {
5763 kvm_queue_exception(vcpu, UD_VECTOR);
5764 return 0;
5765 }
5766
5767 if (vmx_get_cpl(vcpu)) {
5768 kvm_inject_gp(vcpu, 0);
5769 return 0;
5770 }
5771
5772 return 1;
5773}
5774
5775static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
5776{
5777 u32 exec_control;
5778 if (enable_shadow_vmcs) {
5779 if (vmx->nested.current_vmcs12 != NULL) {
5780
5781
5782 copy_shadow_to_vmcs12(vmx);
5783 vmx->nested.sync_shadow_vmcs = false;
5784 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
5785 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
5786 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
5787 vmcs_write64(VMCS_LINK_POINTER, -1ull);
5788 }
5789 }
5790 kunmap(vmx->nested.current_vmcs12_page);
5791 nested_release_page(vmx->nested.current_vmcs12_page);
5792}
5793
5794
5795
5796
5797
5798static void free_nested(struct vcpu_vmx *vmx)
5799{
5800 if (!vmx->nested.vmxon)
5801 return;
5802 vmx->nested.vmxon = false;
5803 if (vmx->nested.current_vmptr != -1ull) {
5804 nested_release_vmcs12(vmx);
5805 vmx->nested.current_vmptr = -1ull;
5806 vmx->nested.current_vmcs12 = NULL;
5807 }
5808 if (enable_shadow_vmcs)
5809 free_vmcs(vmx->nested.current_shadow_vmcs);
5810
5811 if (vmx->nested.apic_access_page) {
5812 nested_release_page(vmx->nested.apic_access_page);
5813 vmx->nested.apic_access_page = 0;
5814 }
5815
5816 nested_free_all_saved_vmcss(vmx);
5817}
5818
5819
5820static int handle_vmoff(struct kvm_vcpu *vcpu)
5821{
5822 if (!nested_vmx_check_permission(vcpu))
5823 return 1;
5824 free_nested(to_vmx(vcpu));
5825 skip_emulated_instruction(vcpu);
5826 nested_vmx_succeed(vcpu);
5827 return 1;
5828}
5829
5830
5831
5832
5833
5834
5835
5836static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
5837 unsigned long exit_qualification,
5838 u32 vmx_instruction_info, gva_t *ret)
5839{
5840
5841
5842
5843
5844
5845
5846
5847
5848 int scaling = vmx_instruction_info & 3;
5849 int addr_size = (vmx_instruction_info >> 7) & 7;
5850 bool is_reg = vmx_instruction_info & (1u << 10);
5851 int seg_reg = (vmx_instruction_info >> 15) & 7;
5852 int index_reg = (vmx_instruction_info >> 18) & 0xf;
5853 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
5854 int base_reg = (vmx_instruction_info >> 23) & 0xf;
5855 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
5856
5857 if (is_reg) {
5858 kvm_queue_exception(vcpu, UD_VECTOR);
5859 return 1;
5860 }
5861
5862
5863
5864 *ret = vmx_get_segment_base(vcpu, seg_reg);
5865 if (base_is_valid)
5866 *ret += kvm_register_read(vcpu, base_reg);
5867 if (index_is_valid)
5868 *ret += kvm_register_read(vcpu, index_reg)<<scaling;
5869 *ret += exit_qualification;
5870
5871 if (addr_size == 1)
5872 *ret &= 0xffffffff;
5873
5874
5875
5876
5877
5878
5879
5880 return 0;
5881}
5882
5883
5884static int handle_vmclear(struct kvm_vcpu *vcpu)
5885{
5886 struct vcpu_vmx *vmx = to_vmx(vcpu);
5887 gva_t gva;
5888 gpa_t vmptr;
5889 struct vmcs12 *vmcs12;
5890 struct page *page;
5891 struct x86_exception e;
5892
5893 if (!nested_vmx_check_permission(vcpu))
5894 return 1;
5895
5896 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
5897 vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
5898 return 1;
5899
5900 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
5901 sizeof(vmptr), &e)) {
5902 kvm_inject_page_fault(vcpu, &e);
5903 return 1;
5904 }
5905
5906 if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
5907 nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
5908 skip_emulated_instruction(vcpu);
5909 return 1;
5910 }
5911
5912 if (vmptr == vmx->nested.current_vmptr) {
5913 nested_release_vmcs12(vmx);
5914 vmx->nested.current_vmptr = -1ull;
5915 vmx->nested.current_vmcs12 = NULL;
5916 }
5917
5918 page = nested_get_page(vcpu, vmptr);
5919 if (page == NULL) {
5920
5921
5922
5923
5924
5925
5926
5927 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5928 return 1;
5929 }
5930 vmcs12 = kmap(page);
5931 vmcs12->launch_state = 0;
5932 kunmap(page);
5933 nested_release_page(page);
5934
5935 nested_free_vmcs02(vmx, vmptr);
5936
5937 skip_emulated_instruction(vcpu);
5938 nested_vmx_succeed(vcpu);
5939 return 1;
5940}
5941
5942static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
5943
5944
5945static int handle_vmlaunch(struct kvm_vcpu *vcpu)
5946{
5947 return nested_vmx_run(vcpu, true);
5948}
5949
5950
5951static int handle_vmresume(struct kvm_vcpu *vcpu)
5952{
5953
5954 return nested_vmx_run(vcpu, false);
5955}
5956
5957enum vmcs_field_type {
5958 VMCS_FIELD_TYPE_U16 = 0,
5959 VMCS_FIELD_TYPE_U64 = 1,
5960 VMCS_FIELD_TYPE_U32 = 2,
5961 VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
5962};
5963
5964static inline int vmcs_field_type(unsigned long field)
5965{
5966 if (0x1 & field)
5967 return VMCS_FIELD_TYPE_U32;
5968 return (field >> 13) & 0x3 ;
5969}
5970
5971static inline int vmcs_field_readonly(unsigned long field)
5972{
5973 return (((field >> 10) & 0x3) == 1);
5974}
5975
5976
5977
5978
5979
5980
5981
5982
5983static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
5984 unsigned long field, u64 *ret)
5985{
5986 short offset = vmcs_field_to_offset(field);
5987 char *p;
5988
5989 if (offset < 0)
5990 return 0;
5991
5992 p = ((char *)(get_vmcs12(vcpu))) + offset;
5993
5994 switch (vmcs_field_type(field)) {
5995 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
5996 *ret = *((natural_width *)p);
5997 return 1;
5998 case VMCS_FIELD_TYPE_U16:
5999 *ret = *((u16 *)p);
6000 return 1;
6001 case VMCS_FIELD_TYPE_U32:
6002 *ret = *((u32 *)p);
6003 return 1;
6004 case VMCS_FIELD_TYPE_U64:
6005 *ret = *((u64 *)p);
6006 return 1;
6007 default:
6008 return 0;
6009 }
6010}
6011
6012
6013static inline bool vmcs12_write_any(struct kvm_vcpu *vcpu,
6014 unsigned long field, u64 field_value){
6015 short offset = vmcs_field_to_offset(field);
6016 char *p = ((char *) get_vmcs12(vcpu)) + offset;
6017 if (offset < 0)
6018 return false;
6019
6020 switch (vmcs_field_type(field)) {
6021 case VMCS_FIELD_TYPE_U16:
6022 *(u16 *)p = field_value;
6023 return true;
6024 case VMCS_FIELD_TYPE_U32:
6025 *(u32 *)p = field_value;
6026 return true;
6027 case VMCS_FIELD_TYPE_U64:
6028 *(u64 *)p = field_value;
6029 return true;
6030 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
6031 *(natural_width *)p = field_value;
6032 return true;
6033 default:
6034 return false;
6035 }
6036
6037}
6038
6039static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
6040{
6041 int i;
6042 unsigned long field;
6043 u64 field_value;
6044 struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
6045 const unsigned long *fields = shadow_read_write_fields;
6046 const int num_fields = max_shadow_read_write_fields;
6047
6048 vmcs_load(shadow_vmcs);
6049
6050 for (i = 0; i < num_fields; i++) {
6051 field = fields[i];
6052 switch (vmcs_field_type(field)) {
6053 case VMCS_FIELD_TYPE_U16:
6054 field_value = vmcs_read16(field);
6055 break;
6056 case VMCS_FIELD_TYPE_U32:
6057 field_value = vmcs_read32(field);
6058 break;
6059 case VMCS_FIELD_TYPE_U64:
6060 field_value = vmcs_read64(field);
6061 break;
6062 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
6063 field_value = vmcs_readl(field);
6064 break;
6065 }
6066 vmcs12_write_any(&vmx->vcpu, field, field_value);
6067 }
6068
6069 vmcs_clear(shadow_vmcs);
6070 vmcs_load(vmx->loaded_vmcs->vmcs);
6071}
6072
6073static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
6074{
6075 const unsigned long *fields[] = {
6076 shadow_read_write_fields,
6077 shadow_read_only_fields
6078 };
6079 const int max_fields[] = {
6080 max_shadow_read_write_fields,
6081 max_shadow_read_only_fields
6082 };
6083 int i, q;
6084 unsigned long field;
6085 u64 field_value = 0;
6086 struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
6087
6088 vmcs_load(shadow_vmcs);
6089
6090 for (q = 0; q < ARRAY_SIZE(fields); q++) {
6091 for (i = 0; i < max_fields[q]; i++) {
6092 field = fields[q][i];
6093 vmcs12_read_any(&vmx->vcpu, field, &field_value);
6094
6095 switch (vmcs_field_type(field)) {
6096 case VMCS_FIELD_TYPE_U16:
6097 vmcs_write16(field, (u16)field_value);
6098 break;
6099 case VMCS_FIELD_TYPE_U32:
6100 vmcs_write32(field, (u32)field_value);
6101 break;
6102 case VMCS_FIELD_TYPE_U64:
6103 vmcs_write64(field, (u64)field_value);
6104 break;
6105 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
6106 vmcs_writel(field, (long)field_value);
6107 break;
6108 }
6109 }
6110 }
6111
6112 vmcs_clear(shadow_vmcs);
6113 vmcs_load(vmx->loaded_vmcs->vmcs);
6114}
6115
6116
6117
6118
6119
6120static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
6121{
6122 struct vcpu_vmx *vmx = to_vmx(vcpu);
6123 if (vmx->nested.current_vmptr == -1ull) {
6124 nested_vmx_failInvalid(vcpu);
6125 skip_emulated_instruction(vcpu);
6126 return 0;
6127 }
6128 return 1;
6129}
6130
6131static int handle_vmread(struct kvm_vcpu *vcpu)
6132{
6133 unsigned long field;
6134 u64 field_value;
6135 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6136 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6137 gva_t gva = 0;
6138
6139 if (!nested_vmx_check_permission(vcpu) ||
6140 !nested_vmx_check_vmcs12(vcpu))
6141 return 1;
6142
6143
6144 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
6145
6146 if (!vmcs12_read_any(vcpu, field, &field_value)) {
6147 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
6148 skip_emulated_instruction(vcpu);
6149 return 1;
6150 }
6151
6152
6153
6154
6155
6156 if (vmx_instruction_info & (1u << 10)) {
6157 kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
6158 field_value);
6159 } else {
6160 if (get_vmx_mem_address(vcpu, exit_qualification,
6161 vmx_instruction_info, &gva))
6162 return 1;
6163
6164 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
6165 &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
6166 }
6167
6168 nested_vmx_succeed(vcpu);
6169 skip_emulated_instruction(vcpu);
6170 return 1;
6171}
6172
6173
6174static int handle_vmwrite(struct kvm_vcpu *vcpu)
6175{
6176 unsigned long field;
6177 gva_t gva;
6178 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6179 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6180
6181
6182
6183
6184
6185
6186 u64 field_value = 0;
6187 struct x86_exception e;
6188
6189 if (!nested_vmx_check_permission(vcpu) ||
6190 !nested_vmx_check_vmcs12(vcpu))
6191 return 1;
6192
6193 if (vmx_instruction_info & (1u << 10))
6194 field_value = kvm_register_read(vcpu,
6195 (((vmx_instruction_info) >> 3) & 0xf));
6196 else {
6197 if (get_vmx_mem_address(vcpu, exit_qualification,
6198 vmx_instruction_info, &gva))
6199 return 1;
6200 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
6201 &field_value, (is_long_mode(vcpu) ? 8 : 4), &e)) {
6202 kvm_inject_page_fault(vcpu, &e);
6203 return 1;
6204 }
6205 }
6206
6207
6208 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
6209 if (vmcs_field_readonly(field)) {
6210 nested_vmx_failValid(vcpu,
6211 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
6212 skip_emulated_instruction(vcpu);
6213 return 1;
6214 }
6215
6216 if (!vmcs12_write_any(vcpu, field, field_value)) {
6217 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
6218 skip_emulated_instruction(vcpu);
6219 return 1;
6220 }
6221
6222 nested_vmx_succeed(vcpu);
6223 skip_emulated_instruction(vcpu);
6224 return 1;
6225}
6226
6227
6228static int handle_vmptrld(struct kvm_vcpu *vcpu)
6229{
6230 struct vcpu_vmx *vmx = to_vmx(vcpu);
6231 gva_t gva;
6232 gpa_t vmptr;
6233 struct x86_exception e;
6234 u32 exec_control;
6235
6236 if (!nested_vmx_check_permission(vcpu))
6237 return 1;
6238
6239 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
6240 vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
6241 return 1;
6242
6243 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
6244 sizeof(vmptr), &e)) {
6245 kvm_inject_page_fault(vcpu, &e);
6246 return 1;
6247 }
6248
6249 if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
6250 nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
6251 skip_emulated_instruction(vcpu);
6252 return 1;
6253 }
6254
6255 if (vmx->nested.current_vmptr != vmptr) {
6256 struct vmcs12 *new_vmcs12;
6257 struct page *page;
6258 page = nested_get_page(vcpu, vmptr);
6259 if (page == NULL) {
6260 nested_vmx_failInvalid(vcpu);
6261 skip_emulated_instruction(vcpu);
6262 return 1;
6263 }
6264 new_vmcs12 = kmap(page);
6265 if (new_vmcs12->revision_id != VMCS12_REVISION) {
6266 kunmap(page);
6267 nested_release_page_clean(page);
6268 nested_vmx_failValid(vcpu,
6269 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
6270 skip_emulated_instruction(vcpu);
6271 return 1;
6272 }
6273 if (vmx->nested.current_vmptr != -1ull)
6274 nested_release_vmcs12(vmx);
6275
6276 vmx->nested.current_vmptr = vmptr;
6277 vmx->nested.current_vmcs12 = new_vmcs12;
6278 vmx->nested.current_vmcs12_page = page;
6279 if (enable_shadow_vmcs) {
6280 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6281 exec_control |= SECONDARY_EXEC_SHADOW_VMCS;
6282 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
6283 vmcs_write64(VMCS_LINK_POINTER,
6284 __pa(vmx->nested.current_shadow_vmcs));
6285 vmx->nested.sync_shadow_vmcs = true;
6286 }
6287 }
6288
6289 nested_vmx_succeed(vcpu);
6290 skip_emulated_instruction(vcpu);
6291 return 1;
6292}
6293
6294
6295static int handle_vmptrst(struct kvm_vcpu *vcpu)
6296{
6297 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6298 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6299 gva_t vmcs_gva;
6300 struct x86_exception e;
6301
6302 if (!nested_vmx_check_permission(vcpu))
6303 return 1;
6304
6305 if (get_vmx_mem_address(vcpu, exit_qualification,
6306 vmx_instruction_info, &vmcs_gva))
6307 return 1;
6308
6309 if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
6310 (void *)&to_vmx(vcpu)->nested.current_vmptr,
6311 sizeof(u64), &e)) {
6312 kvm_inject_page_fault(vcpu, &e);
6313 return 1;
6314 }
6315 nested_vmx_succeed(vcpu);
6316 skip_emulated_instruction(vcpu);
6317 return 1;
6318}
6319
6320
6321static int handle_invept(struct kvm_vcpu *vcpu)
6322{
6323 u32 vmx_instruction_info, types;
6324 unsigned long type;
6325 gva_t gva;
6326 struct x86_exception e;
6327 struct {
6328 u64 eptp, gpa;
6329 } operand;
6330 u64 eptp_mask = ((1ull << 51) - 1) & PAGE_MASK;
6331
6332 if (!(nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) ||
6333 !(nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
6334 kvm_queue_exception(vcpu, UD_VECTOR);
6335 return 1;
6336 }
6337
6338 if (!nested_vmx_check_permission(vcpu))
6339 return 1;
6340
6341 if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
6342 kvm_queue_exception(vcpu, UD_VECTOR);
6343 return 1;
6344 }
6345
6346 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6347 type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
6348
6349 types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
6350
6351 if (!(types & (1UL << type))) {
6352 nested_vmx_failValid(vcpu,
6353 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
6354 return 1;
6355 }
6356
6357
6358
6359
6360 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
6361 vmx_instruction_info, &gva))
6362 return 1;
6363 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
6364 sizeof(operand), &e)) {
6365 kvm_inject_page_fault(vcpu, &e);
6366 return 1;
6367 }
6368
6369 switch (type) {
6370 case VMX_EPT_EXTENT_CONTEXT:
6371 if ((operand.eptp & eptp_mask) !=
6372 (nested_ept_get_cr3(vcpu) & eptp_mask))
6373 break;
6374 case VMX_EPT_EXTENT_GLOBAL:
6375 kvm_mmu_sync_roots(vcpu);
6376 kvm_mmu_flush_tlb(vcpu);
6377 nested_vmx_succeed(vcpu);
6378 break;
6379 default:
6380 BUG_ON(1);
6381 break;
6382 }
6383
6384 skip_emulated_instruction(vcpu);
6385 return 1;
6386}
6387
6388
6389
6390
6391
6392
6393static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
6394 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
6395 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
6396 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
6397 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
6398 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
6399 [EXIT_REASON_CR_ACCESS] = handle_cr,
6400 [EXIT_REASON_DR_ACCESS] = handle_dr,
6401 [EXIT_REASON_CPUID] = handle_cpuid,
6402 [EXIT_REASON_MSR_READ] = handle_rdmsr,
6403 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
6404 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
6405 [EXIT_REASON_HLT] = handle_halt,
6406 [EXIT_REASON_INVD] = handle_invd,
6407 [EXIT_REASON_INVLPG] = handle_invlpg,
6408 [EXIT_REASON_RDPMC] = handle_rdpmc,
6409 [EXIT_REASON_VMCALL] = handle_vmcall,
6410 [EXIT_REASON_VMCLEAR] = handle_vmclear,
6411 [EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
6412 [EXIT_REASON_VMPTRLD] = handle_vmptrld,
6413 [EXIT_REASON_VMPTRST] = handle_vmptrst,
6414 [EXIT_REASON_VMREAD] = handle_vmread,
6415 [EXIT_REASON_VMRESUME] = handle_vmresume,
6416 [EXIT_REASON_VMWRITE] = handle_vmwrite,
6417 [EXIT_REASON_VMOFF] = handle_vmoff,
6418 [EXIT_REASON_VMON] = handle_vmon,
6419 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
6420 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
6421 [EXIT_REASON_APIC_WRITE] = handle_apic_write,
6422 [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
6423 [EXIT_REASON_WBINVD] = handle_wbinvd,
6424 [EXIT_REASON_XSETBV] = handle_xsetbv,
6425 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
6426 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
6427 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
6428 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
6429 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
6430 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
6431 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
6432 [EXIT_REASON_INVEPT] = handle_invept,
6433};
6434
6435static const int kvm_vmx_max_exit_handlers =
6436 ARRAY_SIZE(kvm_vmx_exit_handlers);
6437
6438static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
6439 struct vmcs12 *vmcs12)
6440{
6441 unsigned long exit_qualification;
6442 gpa_t bitmap, last_bitmap;
6443 unsigned int port;
6444 int size;
6445 u8 b;
6446
6447 if (nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING))
6448 return 1;
6449
6450 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
6451 return 0;
6452
6453 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6454
6455 port = exit_qualification >> 16;
6456 size = (exit_qualification & 7) + 1;
6457
6458 last_bitmap = (gpa_t)-1;
6459 b = -1;
6460
6461 while (size > 0) {
6462 if (port < 0x8000)
6463 bitmap = vmcs12->io_bitmap_a;
6464 else if (port < 0x10000)
6465 bitmap = vmcs12->io_bitmap_b;
6466 else
6467 return 1;
6468 bitmap += (port & 0x7fff) / 8;
6469
6470 if (last_bitmap != bitmap)
6471 if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1))
6472 return 1;
6473 if (b & (1 << (port & 7)))
6474 return 1;
6475
6476 port++;
6477 size--;
6478 last_bitmap = bitmap;
6479 }
6480
6481 return 0;
6482}
6483
6484
6485
6486
6487
6488
6489
6490static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
6491 struct vmcs12 *vmcs12, u32 exit_reason)
6492{
6493 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
6494 gpa_t bitmap;
6495
6496 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
6497 return 1;
6498
6499
6500
6501
6502
6503
6504 bitmap = vmcs12->msr_bitmap;
6505 if (exit_reason == EXIT_REASON_MSR_WRITE)
6506 bitmap += 2048;
6507 if (msr_index >= 0xc0000000) {
6508 msr_index -= 0xc0000000;
6509 bitmap += 1024;
6510 }
6511
6512
6513 if (msr_index < 1024*8) {
6514 unsigned char b;
6515 if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1))
6516 return 1;
6517 return 1 & (b >> (msr_index & 7));
6518 } else
6519 return 1;
6520}
6521
6522
6523
6524
6525
6526
6527static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
6528 struct vmcs12 *vmcs12)
6529{
6530 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6531 int cr = exit_qualification & 15;
6532 int reg = (exit_qualification >> 8) & 15;
6533 unsigned long val = kvm_register_read(vcpu, reg);
6534
6535 switch ((exit_qualification >> 4) & 3) {
6536 case 0:
6537 switch (cr) {
6538 case 0:
6539 if (vmcs12->cr0_guest_host_mask &
6540 (val ^ vmcs12->cr0_read_shadow))
6541 return 1;
6542 break;
6543 case 3:
6544 if ((vmcs12->cr3_target_count >= 1 &&
6545 vmcs12->cr3_target_value0 == val) ||
6546 (vmcs12->cr3_target_count >= 2 &&
6547 vmcs12->cr3_target_value1 == val) ||
6548 (vmcs12->cr3_target_count >= 3 &&
6549 vmcs12->cr3_target_value2 == val) ||
6550 (vmcs12->cr3_target_count >= 4 &&
6551 vmcs12->cr3_target_value3 == val))
6552 return 0;
6553 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
6554 return 1;
6555 break;
6556 case 4:
6557 if (vmcs12->cr4_guest_host_mask &
6558 (vmcs12->cr4_read_shadow ^ val))
6559 return 1;
6560 break;
6561 case 8:
6562 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
6563 return 1;
6564 break;
6565 }
6566 break;
6567 case 2:
6568 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
6569 (vmcs12->cr0_read_shadow & X86_CR0_TS))
6570 return 1;
6571 break;
6572 case 1:
6573 switch (cr) {
6574 case 3:
6575 if (vmcs12->cpu_based_vm_exec_control &
6576 CPU_BASED_CR3_STORE_EXITING)
6577 return 1;
6578 break;
6579 case 8:
6580 if (vmcs12->cpu_based_vm_exec_control &
6581 CPU_BASED_CR8_STORE_EXITING)
6582 return 1;
6583 break;
6584 }
6585 break;
6586 case 3:
6587
6588
6589
6590
6591 if (vmcs12->cr0_guest_host_mask & 0xe &
6592 (val ^ vmcs12->cr0_read_shadow))
6593 return 1;
6594 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
6595 !(vmcs12->cr0_read_shadow & 0x1) &&
6596 (val & 0x1))
6597 return 1;
6598 break;
6599 }
6600 return 0;
6601}
6602
6603
6604
6605
6606
6607
6608static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
6609{
6610 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
6611 struct vcpu_vmx *vmx = to_vmx(vcpu);
6612 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6613 u32 exit_reason = vmx->exit_reason;
6614
6615 if (vmx->nested.nested_run_pending)
6616 return 0;
6617
6618 if (unlikely(vmx->fail)) {
6619 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
6620 vmcs_read32(VM_INSTRUCTION_ERROR));
6621 return 1;
6622 }
6623
6624 switch (exit_reason) {
6625 case EXIT_REASON_EXCEPTION_NMI:
6626 if (!is_exception(intr_info))
6627 return 0;
6628 else if (is_page_fault(intr_info))
6629 return enable_ept;
6630 return vmcs12->exception_bitmap &
6631 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
6632 case EXIT_REASON_EXTERNAL_INTERRUPT:
6633 return 0;
6634 case EXIT_REASON_TRIPLE_FAULT:
6635 return 1;
6636 case EXIT_REASON_PENDING_INTERRUPT:
6637 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
6638 case EXIT_REASON_NMI_WINDOW:
6639 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
6640 case EXIT_REASON_TASK_SWITCH:
6641 return 1;
6642 case EXIT_REASON_CPUID:
6643 return 1;
6644 case EXIT_REASON_HLT:
6645 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
6646 case EXIT_REASON_INVD:
6647 return 1;
6648 case EXIT_REASON_INVLPG:
6649 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6650 case EXIT_REASON_RDPMC:
6651 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
6652 case EXIT_REASON_RDTSC:
6653 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
6654 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
6655 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
6656 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
6657 case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
6658 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
6659 case EXIT_REASON_INVEPT:
6660
6661
6662
6663
6664 return 1;
6665 case EXIT_REASON_CR_ACCESS:
6666 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
6667 case EXIT_REASON_DR_ACCESS:
6668 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
6669 case EXIT_REASON_IO_INSTRUCTION:
6670 return nested_vmx_exit_handled_io(vcpu, vmcs12);
6671 case EXIT_REASON_MSR_READ:
6672 case EXIT_REASON_MSR_WRITE:
6673 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
6674 case EXIT_REASON_INVALID_STATE:
6675 return 1;
6676 case EXIT_REASON_MWAIT_INSTRUCTION:
6677 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
6678 case EXIT_REASON_MONITOR_INSTRUCTION:
6679 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
6680 case EXIT_REASON_PAUSE_INSTRUCTION:
6681 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
6682 nested_cpu_has2(vmcs12,
6683 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
6684 case EXIT_REASON_MCE_DURING_VMENTRY:
6685 return 0;
6686 case EXIT_REASON_TPR_BELOW_THRESHOLD:
6687 return 1;
6688 case EXIT_REASON_APIC_ACCESS:
6689 return nested_cpu_has2(vmcs12,
6690 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
6691 case EXIT_REASON_EPT_VIOLATION:
6692
6693
6694
6695
6696
6697
6698 return 0;
6699 case EXIT_REASON_EPT_MISCONFIG:
6700
6701
6702
6703
6704
6705
6706 return 0;
6707 case EXIT_REASON_PREEMPTION_TIMER:
6708 return vmcs12->pin_based_vm_exec_control &
6709 PIN_BASED_VMX_PREEMPTION_TIMER;
6710 case EXIT_REASON_WBINVD:
6711 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
6712 case EXIT_REASON_XSETBV:
6713 return 1;
6714 default:
6715 return 1;
6716 }
6717}
6718
6719static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
6720{
6721 *info1 = vmcs_readl(EXIT_QUALIFICATION);
6722 *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
6723}
6724
6725
6726
6727
6728
6729static int vmx_handle_exit(struct kvm_vcpu *vcpu)
6730{
6731 struct vcpu_vmx *vmx = to_vmx(vcpu);
6732 u32 exit_reason = vmx->exit_reason;
6733 u32 vectoring_info = vmx->idt_vectoring_info;
6734
6735
6736 if (vmx->emulation_required)
6737 return handle_invalid_guest_state(vcpu);
6738
6739
6740
6741
6742
6743
6744 if (vmx->nested.nested_run_pending)
6745 kvm_make_request(KVM_REQ_EVENT, vcpu);
6746
6747 if (!is_guest_mode(vcpu) && (exit_reason == EXIT_REASON_VMLAUNCH ||
6748 exit_reason == EXIT_REASON_VMRESUME))
6749 vmx->nested.nested_run_pending = 1;
6750 else
6751 vmx->nested.nested_run_pending = 0;
6752
6753 if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
6754 nested_vmx_vmexit(vcpu);
6755 return 1;
6756 }
6757
6758 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
6759 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6760 vcpu->run->fail_entry.hardware_entry_failure_reason
6761 = exit_reason;
6762 return 0;
6763 }
6764
6765 if (unlikely(vmx->fail)) {
6766 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6767 vcpu->run->fail_entry.hardware_entry_failure_reason
6768 = vmcs_read32(VM_INSTRUCTION_ERROR);
6769 return 0;
6770 }
6771
6772
6773
6774
6775
6776
6777
6778
6779 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
6780 (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
6781 exit_reason != EXIT_REASON_EPT_VIOLATION &&
6782 exit_reason != EXIT_REASON_TASK_SWITCH)) {
6783 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6784 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
6785 vcpu->run->internal.ndata = 2;
6786 vcpu->run->internal.data[0] = vectoring_info;
6787 vcpu->run->internal.data[1] = exit_reason;
6788 return 0;
6789 }
6790
6791 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
6792 !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
6793 get_vmcs12(vcpu))))) {
6794 if (vmx_interrupt_allowed(vcpu)) {
6795 vmx->soft_vnmi_blocked = 0;
6796 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
6797 vcpu->arch.nmi_pending) {
6798
6799
6800
6801
6802
6803
6804 printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
6805 "state on VCPU %d after 1 s timeout\n",
6806 __func__, vcpu->vcpu_id);
6807 vmx->soft_vnmi_blocked = 0;
6808 }
6809 }
6810
6811 if (exit_reason < kvm_vmx_max_exit_handlers
6812 && kvm_vmx_exit_handlers[exit_reason])
6813 return kvm_vmx_exit_handlers[exit_reason](vcpu);
6814 else {
6815 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
6816 vcpu->run->hw.hardware_exit_reason = exit_reason;
6817 }
6818 return 0;
6819}
6820
6821static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
6822{
6823 if (irr == -1 || tpr < irr) {
6824 vmcs_write32(TPR_THRESHOLD, 0);
6825 return;
6826 }
6827
6828 vmcs_write32(TPR_THRESHOLD, irr);
6829}
6830
6831static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
6832{
6833 u32 sec_exec_control;
6834
6835
6836
6837
6838
6839 if (!cpu_has_vmx_virtualize_x2apic_mode() ||
6840 !vmx_vm_has_apicv(vcpu->kvm))
6841 return;
6842
6843 if (!vm_need_tpr_shadow(vcpu->kvm))
6844 return;
6845
6846 sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6847
6848 if (set) {
6849 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6850 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6851 } else {
6852 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6853 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6854 }
6855 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
6856
6857 vmx_set_msr_bitmap(vcpu);
6858}
6859
6860static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
6861{
6862 u16 status;
6863 u8 old;
6864
6865 if (!vmx_vm_has_apicv(kvm))
6866 return;
6867
6868 if (isr == -1)
6869 isr = 0;
6870
6871 status = vmcs_read16(GUEST_INTR_STATUS);
6872 old = status >> 8;
6873 if (isr != old) {
6874 status &= 0xff;
6875 status |= isr << 8;
6876 vmcs_write16(GUEST_INTR_STATUS, status);
6877 }
6878}
6879
6880static void vmx_set_rvi(int vector)
6881{
6882 u16 status;
6883 u8 old;
6884
6885 status = vmcs_read16(GUEST_INTR_STATUS);
6886 old = (u8)status & 0xff;
6887 if ((u8)vector != old) {
6888 status &= ~0xff;
6889 status |= (u8)vector;
6890 vmcs_write16(GUEST_INTR_STATUS, status);
6891 }
6892}
6893
6894static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
6895{
6896 if (max_irr == -1)
6897 return;
6898
6899 vmx_set_rvi(max_irr);
6900}
6901
6902static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
6903{
6904 if (!vmx_vm_has_apicv(vcpu->kvm))
6905 return;
6906
6907 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
6908 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
6909 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
6910 vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
6911}
6912
6913static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
6914{
6915 u32 exit_intr_info;
6916
6917 if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
6918 || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI))
6919 return;
6920
6921 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
6922 exit_intr_info = vmx->exit_intr_info;
6923
6924
6925 if (is_machine_check(exit_intr_info))
6926 kvm_machine_check();
6927
6928
6929 if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
6930 (exit_intr_info & INTR_INFO_VALID_MASK)) {
6931 kvm_before_handle_nmi(&vmx->vcpu);
6932 asm("int $2");
6933 kvm_after_handle_nmi(&vmx->vcpu);
6934 }
6935}
6936
6937static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
6938{
6939 u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
6940
6941
6942
6943
6944
6945
6946 if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
6947 == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
6948 unsigned int vector;
6949 unsigned long entry;
6950 gate_desc *desc;
6951 struct vcpu_vmx *vmx = to_vmx(vcpu);
6952#ifdef CONFIG_X86_64
6953 unsigned long tmp;
6954#endif
6955
6956 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
6957 desc = (gate_desc *)vmx->host_idt_base + vector;
6958 entry = gate_offset(*desc);
6959 asm volatile(
6960#ifdef CONFIG_X86_64
6961 "mov %%" _ASM_SP ", %[sp]\n\t"
6962 "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
6963 "push $%c[ss]\n\t"
6964 "push %[sp]\n\t"
6965#endif
6966 "pushf\n\t"
6967 "orl $0x200, (%%" _ASM_SP ")\n\t"
6968 __ASM_SIZE(push) " $%c[cs]\n\t"
6969 "call *%[entry]\n\t"
6970 :
6971#ifdef CONFIG_X86_64
6972 [sp]"=&r"(tmp)
6973#endif
6974 :
6975 [entry]"r"(entry),
6976 [ss]"i"(__KERNEL_DS),
6977 [cs]"i"(__KERNEL_CS)
6978 );
6979 } else
6980 local_irq_enable();
6981}
6982
6983static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
6984{
6985 u32 exit_intr_info;
6986 bool unblock_nmi;
6987 u8 vector;
6988 bool idtv_info_valid;
6989
6990 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
6991
6992 if (cpu_has_virtual_nmis()) {
6993 if (vmx->nmi_known_unmasked)
6994 return;
6995
6996
6997
6998
6999 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
7000 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
7001 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
7002
7003
7004
7005
7006
7007
7008
7009
7010
7011
7012 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
7013 vector != DF_VECTOR && !idtv_info_valid)
7014 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
7015 GUEST_INTR_STATE_NMI);
7016 else
7017 vmx->nmi_known_unmasked =
7018 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
7019 & GUEST_INTR_STATE_NMI);
7020 } else if (unlikely(vmx->soft_vnmi_blocked))
7021 vmx->vnmi_blocked_time +=
7022 ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
7023}
7024
7025static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
7026 u32 idt_vectoring_info,
7027 int instr_len_field,
7028 int error_code_field)
7029{
7030 u8 vector;
7031 int type;
7032 bool idtv_info_valid;
7033
7034 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
7035
7036 vcpu->arch.nmi_injected = false;
7037 kvm_clear_exception_queue(vcpu);
7038 kvm_clear_interrupt_queue(vcpu);
7039
7040 if (!idtv_info_valid)
7041 return;
7042
7043 kvm_make_request(KVM_REQ_EVENT, vcpu);
7044
7045 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
7046 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
7047
7048 switch (type) {
7049 case INTR_TYPE_NMI_INTR:
7050 vcpu->arch.nmi_injected = true;
7051
7052
7053
7054
7055
7056 vmx_set_nmi_mask(vcpu, false);
7057 break;
7058 case INTR_TYPE_SOFT_EXCEPTION:
7059 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7060
7061 case INTR_TYPE_HARD_EXCEPTION:
7062 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
7063 u32 err = vmcs_read32(error_code_field);
7064 kvm_queue_exception_e(vcpu, vector, err);
7065 } else
7066 kvm_queue_exception(vcpu, vector);
7067 break;
7068 case INTR_TYPE_SOFT_INTR:
7069 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7070
7071 case INTR_TYPE_EXT_INTR:
7072 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
7073 break;
7074 default:
7075 break;
7076 }
7077}
7078
7079static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
7080{
7081 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
7082 VM_EXIT_INSTRUCTION_LEN,
7083 IDT_VECTORING_ERROR_CODE);
7084}
7085
7086static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
7087{
7088 __vmx_complete_interrupts(vcpu,
7089 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
7090 VM_ENTRY_INSTRUCTION_LEN,
7091 VM_ENTRY_EXCEPTION_ERROR_CODE);
7092
7093 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
7094}
7095
7096static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
7097{
7098 int i, nr_msrs;
7099 struct perf_guest_switch_msr *msrs;
7100
7101 msrs = perf_guest_get_msrs(&nr_msrs);
7102
7103 if (!msrs)
7104 return;
7105
7106 for (i = 0; i < nr_msrs; i++)
7107 if (msrs[i].host == msrs[i].guest)
7108 clear_atomic_switch_msr(vmx, msrs[i].msr);
7109 else
7110 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
7111 msrs[i].host);
7112}
7113
7114static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
7115{
7116 struct vcpu_vmx *vmx = to_vmx(vcpu);
7117 unsigned long debugctlmsr;
7118
7119
7120 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
7121 vmx->entry_time = ktime_get();
7122
7123
7124
7125 if (vmx->emulation_required)
7126 return;
7127
7128 if (vmx->nested.sync_shadow_vmcs) {
7129 copy_vmcs12_to_shadow(vmx);
7130 vmx->nested.sync_shadow_vmcs = false;
7131 }
7132
7133 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
7134 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
7135 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
7136 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
7137
7138
7139
7140
7141
7142
7143 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
7144 vmx_set_interrupt_shadow(vcpu, 0);
7145
7146 atomic_switch_perf_msrs(vmx);
7147 debugctlmsr = get_debugctlmsr();
7148
7149 vmx->__launched = vmx->loaded_vmcs->launched;
7150 asm(
7151
7152 "push %%" _ASM_DX "; push %%" _ASM_BP ";"
7153 "push %%" _ASM_CX " \n\t"
7154 "push %%" _ASM_CX " \n\t"
7155 "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
7156 "je 1f \n\t"
7157 "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
7158 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
7159 "1: \n\t"
7160
7161 "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
7162 "mov %%cr2, %%" _ASM_DX " \n\t"
7163 "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
7164 "je 2f \n\t"
7165 "mov %%" _ASM_AX", %%cr2 \n\t"
7166 "2: \n\t"
7167
7168 "cmpl $0, %c[launched](%0) \n\t"
7169
7170 "mov %c[rax](%0), %%" _ASM_AX " \n\t"
7171 "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
7172 "mov %c[rdx](%0), %%" _ASM_DX " \n\t"
7173 "mov %c[rsi](%0), %%" _ASM_SI " \n\t"
7174 "mov %c[rdi](%0), %%" _ASM_DI " \n\t"
7175 "mov %c[rbp](%0), %%" _ASM_BP " \n\t"
7176#ifdef CONFIG_X86_64
7177 "mov %c[r8](%0), %%r8 \n\t"
7178 "mov %c[r9](%0), %%r9 \n\t"
7179 "mov %c[r10](%0), %%r10 \n\t"
7180 "mov %c[r11](%0), %%r11 \n\t"
7181 "mov %c[r12](%0), %%r12 \n\t"
7182 "mov %c[r13](%0), %%r13 \n\t"
7183 "mov %c[r14](%0), %%r14 \n\t"
7184 "mov %c[r15](%0), %%r15 \n\t"
7185#endif
7186 "mov %c[rcx](%0), %%" _ASM_CX " \n\t"
7187
7188
7189 "jne 1f \n\t"
7190 __ex(ASM_VMX_VMLAUNCH) "\n\t"
7191 "jmp 2f \n\t"
7192 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
7193 "2: "
7194
7195 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
7196 "pop %0 \n\t"
7197 "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
7198 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
7199 __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
7200 "mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
7201 "mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
7202 "mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
7203 "mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
7204#ifdef CONFIG_X86_64
7205 "mov %%r8, %c[r8](%0) \n\t"
7206 "mov %%r9, %c[r9](%0) \n\t"
7207 "mov %%r10, %c[r10](%0) \n\t"
7208 "mov %%r11, %c[r11](%0) \n\t"
7209 "mov %%r12, %c[r12](%0) \n\t"
7210 "mov %%r13, %c[r13](%0) \n\t"
7211 "mov %%r14, %c[r14](%0) \n\t"
7212 "mov %%r15, %c[r15](%0) \n\t"
7213#endif
7214 "mov %%cr2, %%" _ASM_AX " \n\t"
7215 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
7216
7217 "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
7218 "setbe %c[fail](%0) \n\t"
7219 ".pushsection .rodata \n\t"
7220 ".global vmx_return \n\t"
7221 "vmx_return: " _ASM_PTR " 2b \n\t"
7222 ".popsection"
7223 : : "c"(vmx), "d"((unsigned long)HOST_RSP),
7224 [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
7225 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
7226 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
7227 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
7228 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
7229 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
7230 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
7231 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
7232 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
7233 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
7234#ifdef CONFIG_X86_64
7235 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
7236 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
7237 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
7238 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
7239 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
7240 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
7241 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
7242 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
7243#endif
7244 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
7245 [wordsize]"i"(sizeof(ulong))
7246 : "cc", "memory"
7247#ifdef CONFIG_X86_64
7248 , "rax", "rbx", "rdi", "rsi"
7249 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
7250#else
7251 , "eax", "ebx", "edi", "esi"
7252#endif
7253 );
7254
7255
7256 if (debugctlmsr)
7257 update_debugctlmsr(debugctlmsr);
7258
7259#ifndef CONFIG_X86_64
7260
7261
7262
7263
7264
7265
7266
7267
7268 loadsegment(ds, __USER_DS);
7269 loadsegment(es, __USER_DS);
7270#endif
7271
7272 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
7273 | (1 << VCPU_EXREG_RFLAGS)
7274 | (1 << VCPU_EXREG_CPL)
7275 | (1 << VCPU_EXREG_PDPTR)
7276 | (1 << VCPU_EXREG_SEGMENTS)
7277 | (1 << VCPU_EXREG_CR3));
7278 vcpu->arch.regs_dirty = 0;
7279
7280 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
7281
7282 vmx->loaded_vmcs->launched = 1;
7283
7284 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
7285 trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
7286
7287 vmx_complete_atomic_exit(vmx);
7288 vmx_recover_nmi_blocking(vmx);
7289 vmx_complete_interrupts(vmx);
7290}
7291
7292static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
7293{
7294 struct vcpu_vmx *vmx = to_vmx(vcpu);
7295
7296 free_vpid(vmx);
7297 free_nested(vmx);
7298 free_loaded_vmcs(vmx->loaded_vmcs);
7299 kfree(vmx->guest_msrs);
7300 kvm_vcpu_uninit(vcpu);
7301 kmem_cache_free(kvm_vcpu_cache, vmx);
7302}
7303
7304static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
7305{
7306 int err;
7307 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
7308 int cpu;
7309
7310 if (!vmx)
7311 return ERR_PTR(-ENOMEM);
7312
7313 allocate_vpid(vmx);
7314
7315 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
7316 if (err)
7317 goto free_vcpu;
7318
7319 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
7320 err = -ENOMEM;
7321 if (!vmx->guest_msrs) {
7322 goto uninit_vcpu;
7323 }
7324
7325 vmx->loaded_vmcs = &vmx->vmcs01;
7326 vmx->loaded_vmcs->vmcs = alloc_vmcs();
7327 if (!vmx->loaded_vmcs->vmcs)
7328 goto free_msrs;
7329 if (!vmm_exclusive)
7330 kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
7331 loaded_vmcs_init(vmx->loaded_vmcs);
7332 if (!vmm_exclusive)
7333 kvm_cpu_vmxoff();
7334
7335 cpu = get_cpu();
7336 vmx_vcpu_load(&vmx->vcpu, cpu);
7337 vmx->vcpu.cpu = cpu;
7338 err = vmx_vcpu_setup(vmx);
7339 vmx_vcpu_put(&vmx->vcpu);
7340 put_cpu();
7341 if (err)
7342 goto free_vmcs;
7343 if (vm_need_virtualize_apic_accesses(kvm)) {
7344 err = alloc_apic_access_page(kvm);
7345 if (err)
7346 goto free_vmcs;
7347 }
7348
7349 if (enable_ept) {
7350 if (!kvm->arch.ept_identity_map_addr)
7351 kvm->arch.ept_identity_map_addr =
7352 VMX_EPT_IDENTITY_PAGETABLE_ADDR;
7353 err = -ENOMEM;
7354 if (alloc_identity_pagetable(kvm) != 0)
7355 goto free_vmcs;
7356 if (!init_rmode_identity_map(kvm))
7357 goto free_vmcs;
7358 }
7359
7360 vmx->nested.current_vmptr = -1ull;
7361 vmx->nested.current_vmcs12 = NULL;
7362
7363 return &vmx->vcpu;
7364
7365free_vmcs:
7366 free_loaded_vmcs(vmx->loaded_vmcs);
7367free_msrs:
7368 kfree(vmx->guest_msrs);
7369uninit_vcpu:
7370 kvm_vcpu_uninit(&vmx->vcpu);
7371free_vcpu:
7372 free_vpid(vmx);
7373 kmem_cache_free(kvm_vcpu_cache, vmx);
7374 return ERR_PTR(err);
7375}
7376
7377static void __init vmx_check_processor_compat(void *rtn)
7378{
7379 struct vmcs_config vmcs_conf;
7380
7381 *(int *)rtn = 0;
7382 if (setup_vmcs_config(&vmcs_conf) < 0)
7383 *(int *)rtn = -EIO;
7384 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
7385 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
7386 smp_processor_id());
7387 *(int *)rtn = -EIO;
7388 }
7389}
7390
7391static int get_ept_level(void)
7392{
7393 return VMX_EPT_DEFAULT_GAW + 1;
7394}
7395
7396static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
7397{
7398 u64 ret;
7399
7400
7401
7402
7403
7404
7405
7406
7407
7408
7409
7410
7411 if (is_mmio)
7412 ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
7413 else if (vcpu->kvm->arch.iommu_domain &&
7414 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
7415 ret = kvm_get_guest_memory_type(vcpu, gfn) <<
7416 VMX_EPT_MT_EPTE_SHIFT;
7417 else
7418 ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
7419 | VMX_EPT_IPAT_BIT;
7420
7421 return ret;
7422}
7423
7424static int vmx_get_lpage_level(void)
7425{
7426 if (enable_ept && !cpu_has_vmx_ept_1g_page())
7427 return PT_DIRECTORY_LEVEL;
7428 else
7429
7430 return PT_PDPE_LEVEL;
7431}
7432
7433static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
7434{
7435 struct kvm_cpuid_entry2 *best;
7436 struct vcpu_vmx *vmx = to_vmx(vcpu);
7437 u32 exec_control;
7438
7439 vmx->rdtscp_enabled = false;
7440 if (vmx_rdtscp_supported()) {
7441 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
7442 if (exec_control & SECONDARY_EXEC_RDTSCP) {
7443 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
7444 if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
7445 vmx->rdtscp_enabled = true;
7446 else {
7447 exec_control &= ~SECONDARY_EXEC_RDTSCP;
7448 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
7449 exec_control);
7450 }
7451 }
7452 }
7453
7454
7455 best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
7456 if (vmx_invpcid_supported() &&
7457 best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
7458 guest_cpuid_has_pcid(vcpu)) {
7459 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
7460 exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
7461 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
7462 exec_control);
7463 } else {
7464 if (cpu_has_secondary_exec_ctrls()) {
7465 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
7466 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
7467 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
7468 exec_control);
7469 }
7470 if (best)
7471 best->ebx &= ~bit(X86_FEATURE_INVPCID);
7472 }
7473}
7474
7475static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
7476{
7477 if (func == 1 && nested)
7478 entry->ecx |= bit(X86_FEATURE_VMX);
7479}
7480
7481static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
7482 struct x86_exception *fault)
7483{
7484 struct vmcs12 *vmcs12;
7485 nested_vmx_vmexit(vcpu);
7486 vmcs12 = get_vmcs12(vcpu);
7487
7488 if (fault->error_code & PFERR_RSVD_MASK)
7489 vmcs12->vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
7490 else
7491 vmcs12->vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
7492 vmcs12->exit_qualification = vcpu->arch.exit_qualification;
7493 vmcs12->guest_physical_address = fault->address;
7494}
7495
7496
7497
7498static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
7499{
7500
7501 return get_vmcs12(vcpu)->ept_pointer;
7502}
7503
7504static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
7505{
7506 int r = kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
7507 nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);
7508
7509 vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
7510 vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
7511 vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
7512
7513 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
7514
7515 return r;
7516}
7517
7518static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
7519{
7520 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
7521}
7522
7523
7524
7525
7526
7527
7528
7529
7530
7531
7532static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7533{
7534 struct vcpu_vmx *vmx = to_vmx(vcpu);
7535 u32 exec_control;
7536
7537 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
7538 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
7539 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
7540 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
7541 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
7542 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
7543 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
7544 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
7545 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
7546 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
7547 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
7548 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
7549 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
7550 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
7551 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
7552 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
7553 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
7554 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
7555 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
7556 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
7557 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
7558 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
7559 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
7560 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
7561 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
7562 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
7563 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
7564 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
7565 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
7566 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
7567 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
7568 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
7569 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
7570 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
7571 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
7572 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
7573
7574 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
7575 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
7576 vmcs12->vm_entry_intr_info_field);
7577 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
7578 vmcs12->vm_entry_exception_error_code);
7579 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
7580 vmcs12->vm_entry_instruction_len);
7581 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
7582 vmcs12->guest_interruptibility_info);
7583 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
7584 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
7585 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
7586 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
7587 vmcs12->guest_pending_dbg_exceptions);
7588 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
7589 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
7590
7591 vmcs_write64(VMCS_LINK_POINTER, -1ull);
7592
7593 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
7594 (vmcs_config.pin_based_exec_ctrl |
7595 vmcs12->pin_based_vm_exec_control));
7596
7597 if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
7598 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE,
7599 vmcs12->vmx_preemption_timer_value);
7600
7601
7602
7603
7604
7605
7606
7607
7608
7609
7610
7611
7612
7613
7614
7615
7616
7617
7618
7619
7620
7621 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
7622 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
7623 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
7624 enable_ept ? vmcs12->page_fault_error_code_match : 0);
7625
7626 if (cpu_has_secondary_exec_ctrls()) {
7627 u32 exec_control = vmx_secondary_exec_control(vmx);
7628 if (!vmx->rdtscp_enabled)
7629 exec_control &= ~SECONDARY_EXEC_RDTSCP;
7630
7631 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
7632 if (nested_cpu_has(vmcs12,
7633 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
7634 exec_control |= vmcs12->secondary_vm_exec_control;
7635
7636 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
7637
7638
7639
7640
7641
7642
7643 if (vmx->nested.apic_access_page)
7644 nested_release_page(vmx->nested.apic_access_page);
7645 vmx->nested.apic_access_page =
7646 nested_get_page(vcpu, vmcs12->apic_access_addr);
7647
7648
7649
7650
7651
7652
7653 if (!vmx->nested.apic_access_page)
7654 exec_control &=
7655 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
7656 else
7657 vmcs_write64(APIC_ACCESS_ADDR,
7658 page_to_phys(vmx->nested.apic_access_page));
7659 }
7660
7661 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
7662 }
7663
7664
7665
7666
7667
7668
7669
7670
7671 vmx_set_constant_host_state(vmx);
7672
7673
7674
7675
7676
7677
7678
7679
7680 vmx->host_rsp = 0;
7681
7682 exec_control = vmx_exec_control(vmx);
7683 exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
7684 exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
7685 exec_control &= ~CPU_BASED_TPR_SHADOW;
7686 exec_control |= vmcs12->cpu_based_vm_exec_control;
7687
7688
7689
7690
7691 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
7692 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
7693 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
7694
7695 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
7696
7697
7698
7699
7700
7701 update_exception_bitmap(vcpu);
7702 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
7703 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
7704
7705
7706
7707
7708
7709 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
7710
7711
7712
7713
7714 vmcs_write32(VM_ENTRY_CONTROLS,
7715 (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
7716 ~VM_ENTRY_IA32E_MODE) |
7717 (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
7718
7719 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) {
7720 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
7721 vcpu->arch.pat = vmcs12->guest_ia32_pat;
7722 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
7723 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
7724
7725
7726 set_cr4_guest_host_mask(vmx);
7727
7728 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
7729 vmcs_write64(TSC_OFFSET,
7730 vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
7731 else
7732 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
7733
7734 if (enable_vpid) {
7735
7736
7737
7738
7739
7740 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
7741 vmx_flush_tlb(vcpu);
7742 }
7743
7744 if (nested_cpu_has_ept(vmcs12)) {
7745 kvm_mmu_unload(vcpu);
7746 nested_ept_init_mmu_context(vcpu);
7747 }
7748
7749 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
7750 vcpu->arch.efer = vmcs12->guest_ia32_efer;
7751 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
7752 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
7753 else
7754 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
7755
7756 vmx_set_efer(vcpu, vcpu->arch.efer);
7757
7758
7759
7760
7761
7762
7763
7764
7765
7766 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
7767 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
7768
7769 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
7770 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
7771
7772
7773 kvm_set_cr3(vcpu, vmcs12->guest_cr3);
7774 kvm_mmu_reset_context(vcpu);
7775
7776
7777
7778
7779 if (enable_ept) {
7780 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
7781 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
7782 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
7783 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
7784 }
7785
7786 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
7787 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
7788}
7789
7790
7791
7792
7793
7794static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
7795{
7796 struct vmcs12 *vmcs12;
7797 struct vcpu_vmx *vmx = to_vmx(vcpu);
7798 int cpu;
7799 struct loaded_vmcs *vmcs02;
7800 bool ia32e;
7801
7802 if (!nested_vmx_check_permission(vcpu) ||
7803 !nested_vmx_check_vmcs12(vcpu))
7804 return 1;
7805
7806 skip_emulated_instruction(vcpu);
7807 vmcs12 = get_vmcs12(vcpu);
7808
7809 if (enable_shadow_vmcs)
7810 copy_shadow_to_vmcs12(vmx);
7811
7812
7813
7814
7815
7816
7817
7818
7819
7820
7821
7822 if (vmcs12->launch_state == launch) {
7823 nested_vmx_failValid(vcpu,
7824 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
7825 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
7826 return 1;
7827 }
7828
7829 if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE) {
7830 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
7831 return 1;
7832 }
7833
7834 if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) &&
7835 !IS_ALIGNED(vmcs12->msr_bitmap, PAGE_SIZE)) {
7836
7837 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
7838 return 1;
7839 }
7840
7841 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
7842 !IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) {
7843
7844 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
7845 return 1;
7846 }
7847
7848 if (vmcs12->vm_entry_msr_load_count > 0 ||
7849 vmcs12->vm_exit_msr_load_count > 0 ||
7850 vmcs12->vm_exit_msr_store_count > 0) {
7851 pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n",
7852 __func__);
7853 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
7854 return 1;
7855 }
7856
7857 if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
7858 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high) ||
7859 !vmx_control_verify(vmcs12->secondary_vm_exec_control,
7860 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) ||
7861 !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
7862 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) ||
7863 !vmx_control_verify(vmcs12->vm_exit_controls,
7864 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high) ||
7865 !vmx_control_verify(vmcs12->vm_entry_controls,
7866 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high))
7867 {
7868 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
7869 return 1;
7870 }
7871
7872 if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
7873 ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
7874 nested_vmx_failValid(vcpu,
7875 VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
7876 return 1;
7877 }
7878
7879 if (((vmcs12->guest_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
7880 ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
7881 nested_vmx_entry_failure(vcpu, vmcs12,
7882 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
7883 return 1;
7884 }
7885 if (vmcs12->vmcs_link_pointer != -1ull) {
7886 nested_vmx_entry_failure(vcpu, vmcs12,
7887 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
7888 return 1;
7889 }
7890
7891
7892
7893
7894
7895
7896
7897
7898
7899
7900 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) {
7901 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
7902 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
7903 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
7904 ((vmcs12->guest_cr0 & X86_CR0_PG) &&
7905 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
7906 nested_vmx_entry_failure(vcpu, vmcs12,
7907 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
7908 return 1;
7909 }
7910 }
7911
7912
7913
7914
7915
7916
7917
7918 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
7919 ia32e = (vmcs12->vm_exit_controls &
7920 VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
7921 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
7922 ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
7923 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
7924 nested_vmx_entry_failure(vcpu, vmcs12,
7925 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
7926 return 1;
7927 }
7928 }
7929
7930
7931
7932
7933
7934
7935 vmcs02 = nested_get_current_vmcs02(vmx);
7936 if (!vmcs02)
7937 return -ENOMEM;
7938
7939 enter_guest_mode(vcpu);
7940
7941 vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
7942
7943 cpu = get_cpu();
7944 vmx->loaded_vmcs = vmcs02;
7945 vmx_vcpu_put(vcpu);
7946 vmx_vcpu_load(vcpu, cpu);
7947 vcpu->cpu = cpu;
7948 put_cpu();
7949
7950 vmx_segment_cache_clear(vmx);
7951
7952 vmcs12->launch_state = 1;
7953
7954 prepare_vmcs02(vcpu, vmcs12);
7955
7956
7957
7958
7959
7960
7961
7962 return 1;
7963}
7964
7965
7966
7967
7968
7969
7970
7971
7972
7973
7974
7975
7976
7977
7978
7979
7980
7981
7982static inline unsigned long
7983vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7984{
7985 return
7986 (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
7987 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
7988 (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
7989 vcpu->arch.cr0_guest_owned_bits));
7990}
7991
7992static inline unsigned long
7993vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7994{
7995 return
7996 (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
7997 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
7998 (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
7999 vcpu->arch.cr4_guest_owned_bits));
8000}
8001
8002static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
8003 struct vmcs12 *vmcs12)
8004{
8005 u32 idt_vectoring;
8006 unsigned int nr;
8007
8008 if (vcpu->arch.exception.pending) {
8009 nr = vcpu->arch.exception.nr;
8010 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
8011
8012 if (kvm_exception_is_soft(nr)) {
8013 vmcs12->vm_exit_instruction_len =
8014 vcpu->arch.event_exit_inst_len;
8015 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
8016 } else
8017 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
8018
8019 if (vcpu->arch.exception.has_error_code) {
8020 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
8021 vmcs12->idt_vectoring_error_code =
8022 vcpu->arch.exception.error_code;
8023 }
8024
8025 vmcs12->idt_vectoring_info_field = idt_vectoring;
8026 } else if (vcpu->arch.nmi_pending) {
8027 vmcs12->idt_vectoring_info_field =
8028 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
8029 } else if (vcpu->arch.interrupt.pending) {
8030 nr = vcpu->arch.interrupt.nr;
8031 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
8032
8033 if (vcpu->arch.interrupt.soft) {
8034 idt_vectoring |= INTR_TYPE_SOFT_INTR;
8035 vmcs12->vm_entry_instruction_len =
8036 vcpu->arch.event_exit_inst_len;
8037 } else
8038 idt_vectoring |= INTR_TYPE_EXT_INTR;
8039
8040 vmcs12->idt_vectoring_info_field = idt_vectoring;
8041 }
8042}
8043
8044
8045
8046
8047
8048
8049
8050
8051
8052
8053
8054
8055static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
8056{
8057
8058 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
8059 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
8060
8061 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
8062 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
8063 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
8064 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
8065
8066 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
8067 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
8068 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
8069 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
8070 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
8071 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
8072 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
8073 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
8074 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
8075 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
8076 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
8077 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
8078 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
8079 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
8080 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
8081 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
8082 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
8083 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
8084 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
8085 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
8086 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
8087 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
8088 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
8089 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
8090 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
8091 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
8092 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
8093 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
8094 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
8095 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
8096 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
8097 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
8098 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
8099 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
8100 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
8101 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
8102
8103 vmcs12->guest_interruptibility_info =
8104 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
8105 vmcs12->guest_pending_dbg_exceptions =
8106 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
8107
8108
8109
8110
8111
8112
8113
8114
8115
8116 if (enable_ept) {
8117 vmcs12->guest_cr3 = vmcs_read64(GUEST_CR3);
8118 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
8119 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
8120 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
8121 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
8122 }
8123
8124 vmcs12->vm_entry_controls =
8125 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
8126 (vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
8127
8128
8129
8130 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
8131 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
8132 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
8133 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
8134 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
8135 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
8136
8137
8138
8139 vmcs12->vm_exit_reason = to_vmx(vcpu)->exit_reason;
8140 vmcs12->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
8141
8142 vmcs12->vm_exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
8143 if ((vmcs12->vm_exit_intr_info &
8144 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
8145 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK))
8146 vmcs12->vm_exit_intr_error_code =
8147 vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
8148 vmcs12->idt_vectoring_info_field = 0;
8149 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
8150 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8151
8152 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
8153
8154
8155 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
8156
8157
8158
8159
8160
8161 vmcs12_save_pending_event(vcpu, vmcs12);
8162 }
8163
8164
8165
8166
8167
8168 vcpu->arch.nmi_injected = false;
8169 kvm_clear_exception_queue(vcpu);
8170 kvm_clear_interrupt_queue(vcpu);
8171}
8172
8173
8174
8175
8176
8177
8178
8179
8180
8181
8182static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
8183 struct vmcs12 *vmcs12)
8184{
8185 struct kvm_segment seg;
8186
8187 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
8188 vcpu->arch.efer = vmcs12->host_ia32_efer;
8189 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
8190 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
8191 else
8192 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
8193 vmx_set_efer(vcpu, vcpu->arch.efer);
8194
8195 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
8196 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
8197 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
8198
8199
8200
8201
8202
8203
8204 kvm_set_cr0(vcpu, vmcs12->host_cr0);
8205
8206
8207
8208
8209
8210 update_exception_bitmap(vcpu);
8211 vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0);
8212 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
8213
8214
8215
8216
8217
8218 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
8219 kvm_set_cr4(vcpu, vmcs12->host_cr4);
8220
8221 if (nested_cpu_has_ept(vmcs12))
8222 nested_ept_uninit_mmu_context(vcpu);
8223
8224 kvm_set_cr3(vcpu, vmcs12->host_cr3);
8225 kvm_mmu_reset_context(vcpu);
8226
8227 if (enable_vpid) {
8228
8229
8230
8231
8232
8233 vmx_flush_tlb(vcpu);
8234 }
8235
8236
8237 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
8238 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
8239 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
8240 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
8241 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
8242
8243 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
8244 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
8245 vcpu->arch.pat = vmcs12->host_ia32_pat;
8246 }
8247 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
8248 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
8249 vmcs12->host_ia32_perf_global_ctrl);
8250
8251
8252
8253 seg = (struct kvm_segment) {
8254 .base = 0,
8255 .limit = 0xFFFFFFFF,
8256 .selector = vmcs12->host_cs_selector,
8257 .type = 11,
8258 .present = 1,
8259 .s = 1,
8260 .g = 1
8261 };
8262 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
8263 seg.l = 1;
8264 else
8265 seg.db = 1;
8266 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
8267 seg = (struct kvm_segment) {
8268 .base = 0,
8269 .limit = 0xFFFFFFFF,
8270 .type = 3,
8271 .present = 1,
8272 .s = 1,
8273 .db = 1,
8274 .g = 1
8275 };
8276 seg.selector = vmcs12->host_ds_selector;
8277 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
8278 seg.selector = vmcs12->host_es_selector;
8279 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
8280 seg.selector = vmcs12->host_ss_selector;
8281 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
8282 seg.selector = vmcs12->host_fs_selector;
8283 seg.base = vmcs12->host_fs_base;
8284 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
8285 seg.selector = vmcs12->host_gs_selector;
8286 seg.base = vmcs12->host_gs_base;
8287 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
8288 seg = (struct kvm_segment) {
8289 .base = vmcs12->host_tr_base,
8290 .limit = 0x67,
8291 .selector = vmcs12->host_tr_selector,
8292 .type = 11,
8293 .present = 1
8294 };
8295 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
8296
8297 kvm_set_dr(vcpu, 7, 0x400);
8298 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
8299}
8300
8301
8302
8303
8304
8305
8306static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
8307{
8308 struct vcpu_vmx *vmx = to_vmx(vcpu);
8309 int cpu;
8310 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
8311
8312
8313 WARN_ON_ONCE(vmx->nested.nested_run_pending);
8314
8315 leave_guest_mode(vcpu);
8316 prepare_vmcs12(vcpu, vmcs12);
8317
8318 cpu = get_cpu();
8319 vmx->loaded_vmcs = &vmx->vmcs01;
8320 vmx_vcpu_put(vcpu);
8321 vmx_vcpu_load(vcpu, cpu);
8322 vcpu->cpu = cpu;
8323 put_cpu();
8324
8325 vmx_segment_cache_clear(vmx);
8326
8327
8328 if (VMCS02_POOL_SIZE == 0)
8329 nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
8330
8331 load_vmcs12_host_state(vcpu, vmcs12);
8332
8333
8334 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
8335
8336
8337 vmx->host_rsp = 0;
8338
8339
8340 if (vmx->nested.apic_access_page) {
8341 nested_release_page(vmx->nested.apic_access_page);
8342 vmx->nested.apic_access_page = 0;
8343 }
8344
8345
8346
8347
8348
8349
8350 if (unlikely(vmx->fail)) {
8351 vmx->fail = 0;
8352 nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
8353 } else
8354 nested_vmx_succeed(vcpu);
8355 if (enable_shadow_vmcs)
8356 vmx->nested.sync_shadow_vmcs = true;
8357}
8358
8359
8360
8361
8362
8363
8364
8365
8366static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
8367 struct vmcs12 *vmcs12,
8368 u32 reason, unsigned long qualification)
8369{
8370 load_vmcs12_host_state(vcpu, vmcs12);
8371 vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
8372 vmcs12->exit_qualification = qualification;
8373 nested_vmx_succeed(vcpu);
8374 if (enable_shadow_vmcs)
8375 to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
8376}
8377
8378static int vmx_check_intercept(struct kvm_vcpu *vcpu,
8379 struct x86_instruction_info *info,
8380 enum x86_intercept_stage stage)
8381{
8382 return X86EMUL_CONTINUE;
8383}
8384
8385static struct kvm_x86_ops vmx_x86_ops = {
8386 .cpu_has_kvm_support = cpu_has_kvm_support,
8387 .disabled_by_bios = vmx_disabled_by_bios,
8388 .hardware_setup = hardware_setup,
8389 .hardware_unsetup = hardware_unsetup,
8390 .check_processor_compatibility = vmx_check_processor_compat,
8391 .hardware_enable = hardware_enable,
8392 .hardware_disable = hardware_disable,
8393 .cpu_has_accelerated_tpr = report_flexpriority,
8394
8395 .vcpu_create = vmx_create_vcpu,
8396 .vcpu_free = vmx_free_vcpu,
8397 .vcpu_reset = vmx_vcpu_reset,
8398
8399 .prepare_guest_switch = vmx_save_host_state,
8400 .vcpu_load = vmx_vcpu_load,
8401 .vcpu_put = vmx_vcpu_put,
8402
8403 .update_db_bp_intercept = update_exception_bitmap,
8404 .get_msr = vmx_get_msr,
8405 .set_msr = vmx_set_msr,
8406 .get_segment_base = vmx_get_segment_base,
8407 .get_segment = vmx_get_segment,
8408 .set_segment = vmx_set_segment,
8409 .get_cpl = vmx_get_cpl,
8410 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
8411 .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
8412 .decache_cr3 = vmx_decache_cr3,
8413 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
8414 .set_cr0 = vmx_set_cr0,
8415 .set_cr3 = vmx_set_cr3,
8416 .set_cr4 = vmx_set_cr4,
8417 .set_efer = vmx_set_efer,
8418 .get_idt = vmx_get_idt,
8419 .set_idt = vmx_set_idt,
8420 .get_gdt = vmx_get_gdt,
8421 .set_gdt = vmx_set_gdt,
8422 .set_dr7 = vmx_set_dr7,
8423 .cache_reg = vmx_cache_reg,
8424 .get_rflags = vmx_get_rflags,
8425 .set_rflags = vmx_set_rflags,
8426 .fpu_activate = vmx_fpu_activate,
8427 .fpu_deactivate = vmx_fpu_deactivate,
8428
8429 .tlb_flush = vmx_flush_tlb,
8430
8431 .run = vmx_vcpu_run,
8432 .handle_exit = vmx_handle_exit,
8433 .skip_emulated_instruction = skip_emulated_instruction,
8434 .set_interrupt_shadow = vmx_set_interrupt_shadow,
8435 .get_interrupt_shadow = vmx_get_interrupt_shadow,
8436 .patch_hypercall = vmx_patch_hypercall,
8437 .set_irq = vmx_inject_irq,
8438 .set_nmi = vmx_inject_nmi,
8439 .queue_exception = vmx_queue_exception,
8440 .cancel_injection = vmx_cancel_injection,
8441 .interrupt_allowed = vmx_interrupt_allowed,
8442 .nmi_allowed = vmx_nmi_allowed,
8443 .get_nmi_mask = vmx_get_nmi_mask,
8444 .set_nmi_mask = vmx_set_nmi_mask,
8445 .enable_nmi_window = enable_nmi_window,
8446 .enable_irq_window = enable_irq_window,
8447 .update_cr8_intercept = update_cr8_intercept,
8448 .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
8449 .vm_has_apicv = vmx_vm_has_apicv,
8450 .load_eoi_exitmap = vmx_load_eoi_exitmap,
8451 .hwapic_irr_update = vmx_hwapic_irr_update,
8452 .hwapic_isr_update = vmx_hwapic_isr_update,
8453 .sync_pir_to_irr = vmx_sync_pir_to_irr,
8454 .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
8455
8456 .set_tss_addr = vmx_set_tss_addr,
8457 .get_tdp_level = get_ept_level,
8458 .get_mt_mask = vmx_get_mt_mask,
8459
8460 .get_exit_info = vmx_get_exit_info,
8461
8462 .get_lpage_level = vmx_get_lpage_level,
8463
8464 .cpuid_update = vmx_cpuid_update,
8465
8466 .rdtscp_supported = vmx_rdtscp_supported,
8467 .invpcid_supported = vmx_invpcid_supported,
8468
8469 .set_supported_cpuid = vmx_set_supported_cpuid,
8470
8471 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
8472
8473 .set_tsc_khz = vmx_set_tsc_khz,
8474 .read_tsc_offset = vmx_read_tsc_offset,
8475 .write_tsc_offset = vmx_write_tsc_offset,
8476 .adjust_tsc_offset = vmx_adjust_tsc_offset,
8477 .compute_tsc_offset = vmx_compute_tsc_offset,
8478 .read_l1_tsc = vmx_read_l1_tsc,
8479
8480 .set_tdp_cr3 = vmx_set_cr3,
8481
8482 .check_intercept = vmx_check_intercept,
8483 .handle_external_intr = vmx_handle_external_intr,
8484};
8485
8486static int __init vmx_init(void)
8487{
8488 int r, i, msr;
8489
8490 rdmsrl_safe(MSR_EFER, &host_efer);
8491
8492 for (i = 0; i < NR_VMX_MSR; ++i)
8493 kvm_define_shared_msr(i, vmx_msr_index[i]);
8494
8495 vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
8496 if (!vmx_io_bitmap_a)
8497 return -ENOMEM;
8498
8499 r = -ENOMEM;
8500
8501 vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
8502 if (!vmx_io_bitmap_b)
8503 goto out;
8504
8505 vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
8506 if (!vmx_msr_bitmap_legacy)
8507 goto out1;
8508
8509 vmx_msr_bitmap_legacy_x2apic =
8510 (unsigned long *)__get_free_page(GFP_KERNEL);
8511 if (!vmx_msr_bitmap_legacy_x2apic)
8512 goto out2;
8513
8514 vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
8515 if (!vmx_msr_bitmap_longmode)
8516 goto out3;
8517
8518 vmx_msr_bitmap_longmode_x2apic =
8519 (unsigned long *)__get_free_page(GFP_KERNEL);
8520 if (!vmx_msr_bitmap_longmode_x2apic)
8521 goto out4;
8522 vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
8523 if (!vmx_vmread_bitmap)
8524 goto out5;
8525
8526 vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
8527 if (!vmx_vmwrite_bitmap)
8528 goto out6;
8529
8530 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
8531 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
8532
8533 for (i = 0; i < max_shadow_read_write_fields; i++) {
8534 clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap);
8535 clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap);
8536 }
8537
8538 for (i = 0; i < max_shadow_read_only_fields; i++)
8539 clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap);
8540
8541
8542
8543
8544
8545 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
8546 clear_bit(0x80, vmx_io_bitmap_a);
8547
8548 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
8549
8550 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
8551 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
8552
8553 set_bit(0, vmx_vpid_bitmap);
8554
8555 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
8556 __alignof__(struct vcpu_vmx), THIS_MODULE);
8557 if (r)
8558 goto out7;
8559
8560#ifdef CONFIG_KEXEC
8561 rcu_assign_pointer(crash_vmclear_loaded_vmcss,
8562 crash_vmclear_local_loaded_vmcss);
8563#endif
8564
8565 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
8566 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
8567 vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
8568 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
8569 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
8570 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
8571 memcpy(vmx_msr_bitmap_legacy_x2apic,
8572 vmx_msr_bitmap_legacy, PAGE_SIZE);
8573 memcpy(vmx_msr_bitmap_longmode_x2apic,
8574 vmx_msr_bitmap_longmode, PAGE_SIZE);
8575
8576 if (enable_apicv) {
8577 for (msr = 0x800; msr <= 0x8ff; msr++)
8578 vmx_disable_intercept_msr_read_x2apic(msr);
8579
8580
8581
8582
8583 vmx_enable_intercept_msr_read_x2apic(0x802);
8584
8585 vmx_enable_intercept_msr_read_x2apic(0x839);
8586
8587 vmx_disable_intercept_msr_write_x2apic(0x808);
8588
8589 vmx_disable_intercept_msr_write_x2apic(0x80b);
8590
8591 vmx_disable_intercept_msr_write_x2apic(0x83f);
8592 }
8593
8594 if (enable_ept) {
8595 kvm_mmu_set_mask_ptes(0ull,
8596 (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
8597 (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
8598 0ull, VMX_EPT_EXECUTABLE_MASK);
8599 ept_set_mmio_spte_mask();
8600 kvm_enable_tdp();
8601 } else
8602 kvm_disable_tdp();
8603
8604 return 0;
8605
8606out7:
8607 free_page((unsigned long)vmx_vmwrite_bitmap);
8608out6:
8609 free_page((unsigned long)vmx_vmread_bitmap);
8610out5:
8611 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
8612out4:
8613 free_page((unsigned long)vmx_msr_bitmap_longmode);
8614out3:
8615 free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
8616out2:
8617 free_page((unsigned long)vmx_msr_bitmap_legacy);
8618out1:
8619 free_page((unsigned long)vmx_io_bitmap_b);
8620out:
8621 free_page((unsigned long)vmx_io_bitmap_a);
8622 return r;
8623}
8624
8625static void __exit vmx_exit(void)
8626{
8627 free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
8628 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
8629 free_page((unsigned long)vmx_msr_bitmap_legacy);
8630 free_page((unsigned long)vmx_msr_bitmap_longmode);
8631 free_page((unsigned long)vmx_io_bitmap_b);
8632 free_page((unsigned long)vmx_io_bitmap_a);
8633 free_page((unsigned long)vmx_vmwrite_bitmap);
8634 free_page((unsigned long)vmx_vmread_bitmap);
8635
8636#ifdef CONFIG_KEXEC
8637 rcu_assign_pointer(crash_vmclear_loaded_vmcss, NULL);
8638 synchronize_rcu();
8639#endif
8640
8641 kvm_exit();
8642}
8643
8644module_init(vmx_init)
8645module_exit(vmx_exit)
8646