1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/kvm_host.h>
20#include "irq.h"
21#include "ioapic.h"
22#include "mmu.h"
23#include "i8254.h"
24#include "tss.h"
25#include "kvm_cache_regs.h"
26#include "kvm_emulate.h"
27#include "x86.h"
28#include "cpuid.h"
29#include "pmu.h"
30#include "hyperv.h"
31#include "lapic.h"
32#include "xen.h"
33
34#include <linux/clocksource.h>
35#include <linux/interrupt.h>
36#include <linux/kvm.h>
37#include <linux/fs.h>
38#include <linux/vmalloc.h>
39#include <linux/export.h>
40#include <linux/moduleparam.h>
41#include <linux/mman.h>
42#include <linux/highmem.h>
43#include <linux/iommu.h>
44#include <linux/intel-iommu.h>
45#include <linux/cpufreq.h>
46#include <linux/user-return-notifier.h>
47#include <linux/srcu.h>
48#include <linux/slab.h>
49#include <linux/perf_event.h>
50#include <linux/uaccess.h>
51#include <linux/hash.h>
52#include <linux/pci.h>
53#include <linux/timekeeper_internal.h>
54#include <linux/pvclock_gtod.h>
55#include <linux/kvm_irqfd.h>
56#include <linux/irqbypass.h>
57#include <linux/sched/stat.h>
58#include <linux/sched/isolation.h>
59#include <linux/mem_encrypt.h>
60#include <linux/entry-kvm.h>
61#include <linux/suspend.h>
62
63#include <trace/events/kvm.h>
64
65#include <asm/debugreg.h>
66#include <asm/msr.h>
67#include <asm/desc.h>
68#include <asm/mce.h>
69#include <asm/pkru.h>
70#include <linux/kernel_stat.h>
71#include <asm/fpu/internal.h>
72#include <asm/pvclock.h>
73#include <asm/div64.h>
74#include <asm/irq_remapping.h>
75#include <asm/mshyperv.h>
76#include <asm/hypervisor.h>
77#include <asm/tlbflush.h>
78#include <asm/intel_pt.h>
79#include <asm/emulate_prefix.h>
80#include <asm/sgx.h>
81#include <clocksource/hyperv_timer.h>
82
83#define CREATE_TRACE_POINTS
84#include "trace.h"
85
86#define MAX_IO_MSRS 256
87#define KVM_MAX_MCE_BANKS 32
88u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
89EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
90
91#define emul_to_vcpu(ctxt) \
92 ((struct kvm_vcpu *)(ctxt)->vcpu)
93
94
95
96
97
98#ifdef CONFIG_X86_64
99static
100u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
101#else
102static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
103#endif
104
105static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
106
107#define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
108
109#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
110 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
111
112static void update_cr8_intercept(struct kvm_vcpu *vcpu);
113static void process_nmi(struct kvm_vcpu *vcpu);
114static void process_smi(struct kvm_vcpu *vcpu);
115static void enter_smm(struct kvm_vcpu *vcpu);
116static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
117static void store_regs(struct kvm_vcpu *vcpu);
118static int sync_regs(struct kvm_vcpu *vcpu);
119
120static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
121static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
122
123struct kvm_x86_ops kvm_x86_ops __read_mostly;
124EXPORT_SYMBOL_GPL(kvm_x86_ops);
125
126#define KVM_X86_OP(func) \
127 DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \
128 *(((struct kvm_x86_ops *)0)->func));
129#define KVM_X86_OP_NULL KVM_X86_OP
130#include <asm/kvm-x86-ops.h>
131EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits);
132EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg);
133EXPORT_STATIC_CALL_GPL(kvm_x86_tlb_flush_current);
134
135static bool __read_mostly ignore_msrs = 0;
136module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
137
138bool __read_mostly report_ignored_msrs = true;
139module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
140EXPORT_SYMBOL_GPL(report_ignored_msrs);
141
142unsigned int min_timer_period_us = 200;
143module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
144
145static bool __read_mostly kvmclock_periodic_sync = true;
146module_param(kvmclock_periodic_sync, bool, S_IRUGO);
147
148bool __read_mostly kvm_has_tsc_control;
149EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
150u32 __read_mostly kvm_max_guest_tsc_khz;
151EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
152u8 __read_mostly kvm_tsc_scaling_ratio_frac_bits;
153EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
154u64 __read_mostly kvm_max_tsc_scaling_ratio;
155EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
156u64 __read_mostly kvm_default_tsc_scaling_ratio;
157EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
158bool __read_mostly kvm_has_bus_lock_exit;
159EXPORT_SYMBOL_GPL(kvm_has_bus_lock_exit);
160
161
162static u32 __read_mostly tsc_tolerance_ppm = 250;
163module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
164
165
166
167
168
169
170
171static int __read_mostly lapic_timer_advance_ns = -1;
172module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR);
173
174static bool __read_mostly vector_hashing = true;
175module_param(vector_hashing, bool, S_IRUGO);
176
177bool __read_mostly enable_vmware_backdoor = false;
178module_param(enable_vmware_backdoor, bool, S_IRUGO);
179EXPORT_SYMBOL_GPL(enable_vmware_backdoor);
180
181static bool __read_mostly force_emulation_prefix = false;
182module_param(force_emulation_prefix, bool, S_IRUGO);
183
184int __read_mostly pi_inject_timer = -1;
185module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR);
186
187
188
189
190
191
192#define KVM_MAX_NR_USER_RETURN_MSRS 16
193
194struct kvm_user_return_msrs {
195 struct user_return_notifier urn;
196 bool registered;
197 struct kvm_user_return_msr_values {
198 u64 host;
199 u64 curr;
200 } values[KVM_MAX_NR_USER_RETURN_MSRS];
201};
202
203u32 __read_mostly kvm_nr_uret_msrs;
204EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs);
205static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
206static struct kvm_user_return_msrs __percpu *user_return_msrs;
207
208#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
209 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
210 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
211 | XFEATURE_MASK_PKRU)
212
213u64 __read_mostly host_efer;
214EXPORT_SYMBOL_GPL(host_efer);
215
216bool __read_mostly allow_smaller_maxphyaddr = 0;
217EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
218
219bool __read_mostly enable_apicv = true;
220EXPORT_SYMBOL_GPL(enable_apicv);
221
222u64 __read_mostly host_xss;
223EXPORT_SYMBOL_GPL(host_xss);
224u64 __read_mostly supported_xss;
225EXPORT_SYMBOL_GPL(supported_xss);
226
227const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
228 KVM_GENERIC_VM_STATS(),
229 STATS_DESC_COUNTER(VM, mmu_shadow_zapped),
230 STATS_DESC_COUNTER(VM, mmu_pte_write),
231 STATS_DESC_COUNTER(VM, mmu_pde_zapped),
232 STATS_DESC_COUNTER(VM, mmu_flooded),
233 STATS_DESC_COUNTER(VM, mmu_recycled),
234 STATS_DESC_COUNTER(VM, mmu_cache_miss),
235 STATS_DESC_ICOUNTER(VM, mmu_unsync),
236 STATS_DESC_ICOUNTER(VM, pages_4k),
237 STATS_DESC_ICOUNTER(VM, pages_2m),
238 STATS_DESC_ICOUNTER(VM, pages_1g),
239 STATS_DESC_ICOUNTER(VM, nx_lpage_splits),
240 STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size),
241 STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions)
242};
243
244const struct kvm_stats_header kvm_vm_stats_header = {
245 .name_size = KVM_STATS_NAME_SIZE,
246 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
247 .id_offset = sizeof(struct kvm_stats_header),
248 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
249 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
250 sizeof(kvm_vm_stats_desc),
251};
252
253const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
254 KVM_GENERIC_VCPU_STATS(),
255 STATS_DESC_COUNTER(VCPU, pf_fixed),
256 STATS_DESC_COUNTER(VCPU, pf_guest),
257 STATS_DESC_COUNTER(VCPU, tlb_flush),
258 STATS_DESC_COUNTER(VCPU, invlpg),
259 STATS_DESC_COUNTER(VCPU, exits),
260 STATS_DESC_COUNTER(VCPU, io_exits),
261 STATS_DESC_COUNTER(VCPU, mmio_exits),
262 STATS_DESC_COUNTER(VCPU, signal_exits),
263 STATS_DESC_COUNTER(VCPU, irq_window_exits),
264 STATS_DESC_COUNTER(VCPU, nmi_window_exits),
265 STATS_DESC_COUNTER(VCPU, l1d_flush),
266 STATS_DESC_COUNTER(VCPU, halt_exits),
267 STATS_DESC_COUNTER(VCPU, request_irq_exits),
268 STATS_DESC_COUNTER(VCPU, irq_exits),
269 STATS_DESC_COUNTER(VCPU, host_state_reload),
270 STATS_DESC_COUNTER(VCPU, fpu_reload),
271 STATS_DESC_COUNTER(VCPU, insn_emulation),
272 STATS_DESC_COUNTER(VCPU, insn_emulation_fail),
273 STATS_DESC_COUNTER(VCPU, hypercalls),
274 STATS_DESC_COUNTER(VCPU, irq_injections),
275 STATS_DESC_COUNTER(VCPU, nmi_injections),
276 STATS_DESC_COUNTER(VCPU, req_event),
277 STATS_DESC_COUNTER(VCPU, nested_run),
278 STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
279 STATS_DESC_COUNTER(VCPU, directed_yield_successful),
280 STATS_DESC_ICOUNTER(VCPU, guest_mode)
281};
282
283const struct kvm_stats_header kvm_vcpu_stats_header = {
284 .name_size = KVM_STATS_NAME_SIZE,
285 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
286 .id_offset = sizeof(struct kvm_stats_header),
287 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
288 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
289 sizeof(kvm_vcpu_stats_desc),
290};
291
292u64 __read_mostly host_xcr0;
293u64 __read_mostly supported_xcr0;
294EXPORT_SYMBOL_GPL(supported_xcr0);
295
296static struct kmem_cache *x86_fpu_cache;
297
298static struct kmem_cache *x86_emulator_cache;
299
300
301
302
303
304static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write)
305{
306 const char *op = write ? "wrmsr" : "rdmsr";
307
308 if (ignore_msrs) {
309 if (report_ignored_msrs)
310 kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n",
311 op, msr, data);
312
313 return true;
314 } else {
315 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
316 op, msr, data);
317 return false;
318 }
319}
320
321static struct kmem_cache *kvm_alloc_emulator_cache(void)
322{
323 unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src);
324 unsigned int size = sizeof(struct x86_emulate_ctxt);
325
326 return kmem_cache_create_usercopy("x86_emulator", size,
327 __alignof__(struct x86_emulate_ctxt),
328 SLAB_ACCOUNT, useroffset,
329 size - useroffset, NULL);
330}
331
332static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
333
334static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
335{
336 int i;
337 for (i = 0; i < ASYNC_PF_PER_VCPU; i++)
338 vcpu->arch.apf.gfns[i] = ~0;
339}
340
341static void kvm_on_user_return(struct user_return_notifier *urn)
342{
343 unsigned slot;
344 struct kvm_user_return_msrs *msrs
345 = container_of(urn, struct kvm_user_return_msrs, urn);
346 struct kvm_user_return_msr_values *values;
347 unsigned long flags;
348
349
350
351
352
353 local_irq_save(flags);
354 if (msrs->registered) {
355 msrs->registered = false;
356 user_return_notifier_unregister(urn);
357 }
358 local_irq_restore(flags);
359 for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
360 values = &msrs->values[slot];
361 if (values->host != values->curr) {
362 wrmsrl(kvm_uret_msrs_list[slot], values->host);
363 values->curr = values->host;
364 }
365 }
366}
367
368static int kvm_probe_user_return_msr(u32 msr)
369{
370 u64 val;
371 int ret;
372
373 preempt_disable();
374 ret = rdmsrl_safe(msr, &val);
375 if (ret)
376 goto out;
377 ret = wrmsrl_safe(msr, val);
378out:
379 preempt_enable();
380 return ret;
381}
382
383int kvm_add_user_return_msr(u32 msr)
384{
385 BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS);
386
387 if (kvm_probe_user_return_msr(msr))
388 return -1;
389
390 kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr;
391 return kvm_nr_uret_msrs++;
392}
393EXPORT_SYMBOL_GPL(kvm_add_user_return_msr);
394
395int kvm_find_user_return_msr(u32 msr)
396{
397 int i;
398
399 for (i = 0; i < kvm_nr_uret_msrs; ++i) {
400 if (kvm_uret_msrs_list[i] == msr)
401 return i;
402 }
403 return -1;
404}
405EXPORT_SYMBOL_GPL(kvm_find_user_return_msr);
406
407static void kvm_user_return_msr_cpu_online(void)
408{
409 unsigned int cpu = smp_processor_id();
410 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
411 u64 value;
412 int i;
413
414 for (i = 0; i < kvm_nr_uret_msrs; ++i) {
415 rdmsrl_safe(kvm_uret_msrs_list[i], &value);
416 msrs->values[i].host = value;
417 msrs->values[i].curr = value;
418 }
419}
420
421int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
422{
423 unsigned int cpu = smp_processor_id();
424 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
425 int err;
426
427 value = (value & mask) | (msrs->values[slot].host & ~mask);
428 if (value == msrs->values[slot].curr)
429 return 0;
430 err = wrmsrl_safe(kvm_uret_msrs_list[slot], value);
431 if (err)
432 return 1;
433
434 msrs->values[slot].curr = value;
435 if (!msrs->registered) {
436 msrs->urn.on_user_return = kvm_on_user_return;
437 user_return_notifier_register(&msrs->urn);
438 msrs->registered = true;
439 }
440 return 0;
441}
442EXPORT_SYMBOL_GPL(kvm_set_user_return_msr);
443
444static void drop_user_return_notifiers(void)
445{
446 unsigned int cpu = smp_processor_id();
447 struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
448
449 if (msrs->registered)
450 kvm_on_user_return(&msrs->urn);
451}
452
453u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
454{
455 return vcpu->arch.apic_base;
456}
457EXPORT_SYMBOL_GPL(kvm_get_apic_base);
458
459enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
460{
461 return kvm_apic_mode(kvm_get_apic_base(vcpu));
462}
463EXPORT_SYMBOL_GPL(kvm_get_apic_mode);
464
465int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
466{
467 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
468 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data);
469 u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff |
470 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
471
472 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
473 return 1;
474 if (!msr_info->host_initiated) {
475 if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC)
476 return 1;
477 if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC)
478 return 1;
479 }
480
481 kvm_lapic_set_base(vcpu, msr_info->data);
482 kvm_recalculate_apic_map(vcpu->kvm);
483 return 0;
484}
485EXPORT_SYMBOL_GPL(kvm_set_apic_base);
486
487
488
489
490
491
492
493
494noinstr void kvm_spurious_fault(void)
495{
496
497 BUG_ON(!kvm_rebooting);
498}
499EXPORT_SYMBOL_GPL(kvm_spurious_fault);
500
501#define EXCPT_BENIGN 0
502#define EXCPT_CONTRIBUTORY 1
503#define EXCPT_PF 2
504
505static int exception_class(int vector)
506{
507 switch (vector) {
508 case PF_VECTOR:
509 return EXCPT_PF;
510 case DE_VECTOR:
511 case TS_VECTOR:
512 case NP_VECTOR:
513 case SS_VECTOR:
514 case GP_VECTOR:
515 return EXCPT_CONTRIBUTORY;
516 default:
517 break;
518 }
519 return EXCPT_BENIGN;
520}
521
522#define EXCPT_FAULT 0
523#define EXCPT_TRAP 1
524#define EXCPT_ABORT 2
525#define EXCPT_INTERRUPT 3
526
527static int exception_type(int vector)
528{
529 unsigned int mask;
530
531 if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
532 return EXCPT_INTERRUPT;
533
534 mask = 1 << vector;
535
536
537 if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
538 return EXCPT_TRAP;
539
540 if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
541 return EXCPT_ABORT;
542
543
544 return EXCPT_FAULT;
545}
546
547void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
548{
549 unsigned nr = vcpu->arch.exception.nr;
550 bool has_payload = vcpu->arch.exception.has_payload;
551 unsigned long payload = vcpu->arch.exception.payload;
552
553 if (!has_payload)
554 return;
555
556 switch (nr) {
557 case DB_VECTOR:
558
559
560
561
562
563 vcpu->arch.dr6 &= ~DR_TRAP_BITS;
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580 vcpu->arch.dr6 |= DR6_ACTIVE_LOW;
581 vcpu->arch.dr6 |= payload;
582 vcpu->arch.dr6 ^= payload & DR6_ACTIVE_LOW;
583
584
585
586
587
588
589
590 vcpu->arch.dr6 &= ~BIT(12);
591 break;
592 case PF_VECTOR:
593 vcpu->arch.cr2 = payload;
594 break;
595 }
596
597 vcpu->arch.exception.has_payload = false;
598 vcpu->arch.exception.payload = 0;
599}
600EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload);
601
602static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
603 unsigned nr, bool has_error, u32 error_code,
604 bool has_payload, unsigned long payload, bool reinject)
605{
606 u32 prev_nr;
607 int class1, class2;
608
609 kvm_make_request(KVM_REQ_EVENT, vcpu);
610
611 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
612 queue:
613 if (reinject) {
614
615
616
617
618
619
620
621
622 WARN_ON_ONCE(vcpu->arch.exception.pending);
623 vcpu->arch.exception.injected = true;
624 if (WARN_ON_ONCE(has_payload)) {
625
626
627
628
629 has_payload = false;
630 payload = 0;
631 }
632 } else {
633 vcpu->arch.exception.pending = true;
634 vcpu->arch.exception.injected = false;
635 }
636 vcpu->arch.exception.has_error_code = has_error;
637 vcpu->arch.exception.nr = nr;
638 vcpu->arch.exception.error_code = error_code;
639 vcpu->arch.exception.has_payload = has_payload;
640 vcpu->arch.exception.payload = payload;
641 if (!is_guest_mode(vcpu))
642 kvm_deliver_exception_payload(vcpu);
643 return;
644 }
645
646
647 prev_nr = vcpu->arch.exception.nr;
648 if (prev_nr == DF_VECTOR) {
649
650 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
651 return;
652 }
653 class1 = exception_class(prev_nr);
654 class2 = exception_class(nr);
655 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
656 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
657
658
659
660
661
662 vcpu->arch.exception.pending = true;
663 vcpu->arch.exception.injected = false;
664 vcpu->arch.exception.has_error_code = true;
665 vcpu->arch.exception.nr = DF_VECTOR;
666 vcpu->arch.exception.error_code = 0;
667 vcpu->arch.exception.has_payload = false;
668 vcpu->arch.exception.payload = 0;
669 } else
670
671
672
673 goto queue;
674}
675
676void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
677{
678 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false);
679}
680EXPORT_SYMBOL_GPL(kvm_queue_exception);
681
682void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
683{
684 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true);
685}
686EXPORT_SYMBOL_GPL(kvm_requeue_exception);
687
688void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
689 unsigned long payload)
690{
691 kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
692}
693EXPORT_SYMBOL_GPL(kvm_queue_exception_p);
694
695static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
696 u32 error_code, unsigned long payload)
697{
698 kvm_multiple_exception(vcpu, nr, true, error_code,
699 true, payload, false);
700}
701
702int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
703{
704 if (err)
705 kvm_inject_gp(vcpu, 0);
706 else
707 return kvm_skip_emulated_instruction(vcpu);
708
709 return 1;
710}
711EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
712
713void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
714{
715 ++vcpu->stat.pf_guest;
716 vcpu->arch.exception.nested_apf =
717 is_guest_mode(vcpu) && fault->async_page_fault;
718 if (vcpu->arch.exception.nested_apf) {
719 vcpu->arch.apf.nested_apf_token = fault->address;
720 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
721 } else {
722 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code,
723 fault->address);
724 }
725}
726EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
727
728bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
729 struct x86_exception *fault)
730{
731 struct kvm_mmu *fault_mmu;
732 WARN_ON_ONCE(fault->vector != PF_VECTOR);
733
734 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
735 vcpu->arch.walk_mmu;
736
737
738
739
740
741 if ((fault->error_code & PFERR_PRESENT_MASK) &&
742 !(fault->error_code & PFERR_RSVD_MASK))
743 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address,
744 fault_mmu->root_hpa);
745
746 fault_mmu->inject_page_fault(vcpu, fault);
747 return fault->nested_page_fault;
748}
749EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault);
750
751void kvm_inject_nmi(struct kvm_vcpu *vcpu)
752{
753 atomic_inc(&vcpu->arch.nmi_queued);
754 kvm_make_request(KVM_REQ_NMI, vcpu);
755}
756EXPORT_SYMBOL_GPL(kvm_inject_nmi);
757
758void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
759{
760 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false);
761}
762EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
763
764void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
765{
766 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true);
767}
768EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
769
770
771
772
773
774bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
775{
776 if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl)
777 return true;
778 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
779 return false;
780}
781EXPORT_SYMBOL_GPL(kvm_require_cpl);
782
783bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
784{
785 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
786 return true;
787
788 kvm_queue_exception(vcpu, UD_VECTOR);
789 return false;
790}
791EXPORT_SYMBOL_GPL(kvm_require_dr);
792
793
794
795
796
797
798int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
799 gfn_t ngfn, void *data, int offset, int len,
800 u32 access)
801{
802 struct x86_exception exception;
803 gfn_t real_gfn;
804 gpa_t ngpa;
805
806 ngpa = gfn_to_gpa(ngfn);
807 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
808 if (real_gfn == UNMAPPED_GVA)
809 return -EFAULT;
810
811 real_gfn = gpa_to_gfn(real_gfn);
812
813 return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
814}
815EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
816
817static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
818{
819 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
820}
821
822
823
824
825int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
826{
827 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
828 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
829 int i;
830 int ret;
831 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
832
833 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
834 offset * sizeof(u64), sizeof(pdpte),
835 PFERR_USER_MASK|PFERR_WRITE_MASK);
836 if (ret < 0) {
837 ret = 0;
838 goto out;
839 }
840 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
841 if ((pdpte[i] & PT_PRESENT_MASK) &&
842 (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
843 ret = 0;
844 goto out;
845 }
846 }
847 ret = 1;
848
849 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
850 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
851 vcpu->arch.pdptrs_from_userspace = false;
852
853out:
854
855 return ret;
856}
857EXPORT_SYMBOL_GPL(load_pdptrs);
858
859void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
860{
861 if ((cr0 ^ old_cr0) & X86_CR0_PG) {
862 kvm_clear_async_pf_completion_queue(vcpu);
863 kvm_async_pf_hash_reset(vcpu);
864 }
865
866 if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
867 kvm_mmu_reset_context(vcpu);
868
869 if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
870 kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
871 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
872 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
873}
874EXPORT_SYMBOL_GPL(kvm_post_set_cr0);
875
876int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
877{
878 unsigned long old_cr0 = kvm_read_cr0(vcpu);
879 unsigned long pdptr_bits = X86_CR0_CD | X86_CR0_NW | X86_CR0_PG;
880
881 cr0 |= X86_CR0_ET;
882
883#ifdef CONFIG_X86_64
884 if (cr0 & 0xffffffff00000000UL)
885 return 1;
886#endif
887
888 cr0 &= ~CR0_RESERVED_BITS;
889
890 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
891 return 1;
892
893 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
894 return 1;
895
896#ifdef CONFIG_X86_64
897 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) &&
898 (cr0 & X86_CR0_PG)) {
899 int cs_db, cs_l;
900
901 if (!is_pae(vcpu))
902 return 1;
903 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
904 if (cs_l)
905 return 1;
906 }
907#endif
908 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) &&
909 is_pae(vcpu) && ((cr0 ^ old_cr0) & pdptr_bits) &&
910 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)))
911 return 1;
912
913 if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
914 return 1;
915
916 static_call(kvm_x86_set_cr0)(vcpu, cr0);
917
918 kvm_post_set_cr0(vcpu, old_cr0, cr0);
919
920 return 0;
921}
922EXPORT_SYMBOL_GPL(kvm_set_cr0);
923
924void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
925{
926 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
927}
928EXPORT_SYMBOL_GPL(kvm_lmsw);
929
930void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
931{
932 if (vcpu->arch.guest_state_protected)
933 return;
934
935 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
936
937 if (vcpu->arch.xcr0 != host_xcr0)
938 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
939
940 if (vcpu->arch.xsaves_enabled &&
941 vcpu->arch.ia32_xss != host_xss)
942 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
943 }
944
945 if (static_cpu_has(X86_FEATURE_PKU) &&
946 (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
947 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) &&
948 vcpu->arch.pkru != vcpu->arch.host_pkru)
949 write_pkru(vcpu->arch.pkru);
950}
951EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
952
953void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
954{
955 if (vcpu->arch.guest_state_protected)
956 return;
957
958 if (static_cpu_has(X86_FEATURE_PKU) &&
959 (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
960 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) {
961 vcpu->arch.pkru = rdpkru();
962 if (vcpu->arch.pkru != vcpu->arch.host_pkru)
963 write_pkru(vcpu->arch.host_pkru);
964 }
965
966 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
967
968 if (vcpu->arch.xcr0 != host_xcr0)
969 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
970
971 if (vcpu->arch.xsaves_enabled &&
972 vcpu->arch.ia32_xss != host_xss)
973 wrmsrl(MSR_IA32_XSS, host_xss);
974 }
975
976}
977EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
978
979static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
980{
981 u64 xcr0 = xcr;
982 u64 old_xcr0 = vcpu->arch.xcr0;
983 u64 valid_bits;
984
985
986 if (index != XCR_XFEATURE_ENABLED_MASK)
987 return 1;
988 if (!(xcr0 & XFEATURE_MASK_FP))
989 return 1;
990 if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
991 return 1;
992
993
994
995
996
997
998 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
999 if (xcr0 & ~valid_bits)
1000 return 1;
1001
1002 if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
1003 (!(xcr0 & XFEATURE_MASK_BNDCSR)))
1004 return 1;
1005
1006 if (xcr0 & XFEATURE_MASK_AVX512) {
1007 if (!(xcr0 & XFEATURE_MASK_YMM))
1008 return 1;
1009 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
1010 return 1;
1011 }
1012 vcpu->arch.xcr0 = xcr0;
1013
1014 if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
1015 kvm_update_cpuid_runtime(vcpu);
1016 return 0;
1017}
1018
1019int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
1020{
1021 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
1022 __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
1023 kvm_inject_gp(vcpu, 0);
1024 return 1;
1025 }
1026
1027 return kvm_skip_emulated_instruction(vcpu);
1028}
1029EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv);
1030
1031bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1032{
1033 if (cr4 & cr4_reserved_bits)
1034 return false;
1035
1036 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
1037 return false;
1038
1039 return static_call(kvm_x86_is_valid_cr4)(vcpu, cr4);
1040}
1041EXPORT_SYMBOL_GPL(kvm_is_valid_cr4);
1042
1043void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
1044{
1045 if (((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) ||
1046 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
1047 kvm_mmu_reset_context(vcpu);
1048}
1049EXPORT_SYMBOL_GPL(kvm_post_set_cr4);
1050
1051int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1052{
1053 unsigned long old_cr4 = kvm_read_cr4(vcpu);
1054 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
1055 X86_CR4_SMEP;
1056
1057 if (!kvm_is_valid_cr4(vcpu, cr4))
1058 return 1;
1059
1060 if (is_long_mode(vcpu)) {
1061 if (!(cr4 & X86_CR4_PAE))
1062 return 1;
1063 if ((cr4 ^ old_cr4) & X86_CR4_LA57)
1064 return 1;
1065 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
1066 && ((cr4 ^ old_cr4) & pdptr_bits)
1067 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
1068 kvm_read_cr3(vcpu)))
1069 return 1;
1070
1071 if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
1072 if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
1073 return 1;
1074
1075
1076 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
1077 return 1;
1078 }
1079
1080 static_call(kvm_x86_set_cr4)(vcpu, cr4);
1081
1082 kvm_post_set_cr4(vcpu, old_cr4, cr4);
1083
1084 return 0;
1085}
1086EXPORT_SYMBOL_GPL(kvm_set_cr4);
1087
1088static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
1089{
1090 struct kvm_mmu *mmu = vcpu->arch.mmu;
1091 unsigned long roots_to_free = 0;
1092 int i;
1093
1094
1095
1096
1097
1098
1099 if (kvm_get_active_pcid(vcpu) == pcid) {
1100 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
1101 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1102 }
1103
1104 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
1105 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid)
1106 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
1107
1108 kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
1109}
1110
1111int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1112{
1113 bool skip_tlb_flush = false;
1114 unsigned long pcid = 0;
1115#ifdef CONFIG_X86_64
1116 bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
1117
1118 if (pcid_enabled) {
1119 skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
1120 cr3 &= ~X86_CR3_PCID_NOFLUSH;
1121 pcid = cr3 & X86_CR3_PCID_MASK;
1122 }
1123#endif
1124
1125
1126 if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu))
1127 goto handle_tlb_flush;
1128
1129
1130
1131
1132
1133
1134 if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
1135 return 1;
1136
1137 if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
1138 return 1;
1139
1140 if (cr3 != kvm_read_cr3(vcpu))
1141 kvm_mmu_new_pgd(vcpu, cr3);
1142
1143 vcpu->arch.cr3 = cr3;
1144 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
1145
1146handle_tlb_flush:
1147
1148
1149
1150
1151
1152
1153
1154 if (!skip_tlb_flush)
1155 kvm_invalidate_pcid(vcpu, pcid);
1156
1157 return 0;
1158}
1159EXPORT_SYMBOL_GPL(kvm_set_cr3);
1160
1161int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
1162{
1163 if (cr8 & CR8_RESERVED_BITS)
1164 return 1;
1165 if (lapic_in_kernel(vcpu))
1166 kvm_lapic_set_tpr(vcpu, cr8);
1167 else
1168 vcpu->arch.cr8 = cr8;
1169 return 0;
1170}
1171EXPORT_SYMBOL_GPL(kvm_set_cr8);
1172
1173unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
1174{
1175 if (lapic_in_kernel(vcpu))
1176 return kvm_lapic_get_cr8(vcpu);
1177 else
1178 return vcpu->arch.cr8;
1179}
1180EXPORT_SYMBOL_GPL(kvm_get_cr8);
1181
1182static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
1183{
1184 int i;
1185
1186 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1187 for (i = 0; i < KVM_NR_DB_REGS; i++)
1188 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
1189 }
1190}
1191
1192void kvm_update_dr7(struct kvm_vcpu *vcpu)
1193{
1194 unsigned long dr7;
1195
1196 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1197 dr7 = vcpu->arch.guest_debug_dr7;
1198 else
1199 dr7 = vcpu->arch.dr7;
1200 static_call(kvm_x86_set_dr7)(vcpu, dr7);
1201 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
1202 if (dr7 & DR7_BP_EN_MASK)
1203 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
1204}
1205EXPORT_SYMBOL_GPL(kvm_update_dr7);
1206
1207static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
1208{
1209 u64 fixed = DR6_FIXED_1;
1210
1211 if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
1212 fixed |= DR6_RTM;
1213
1214 if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
1215 fixed |= DR6_BUS_LOCK;
1216 return fixed;
1217}
1218
1219int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
1220{
1221 size_t size = ARRAY_SIZE(vcpu->arch.db);
1222
1223 switch (dr) {
1224 case 0 ... 3:
1225 vcpu->arch.db[array_index_nospec(dr, size)] = val;
1226 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1227 vcpu->arch.eff_db[dr] = val;
1228 break;
1229 case 4:
1230 case 6:
1231 if (!kvm_dr6_valid(val))
1232 return 1;
1233 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
1234 break;
1235 case 5:
1236 default:
1237 if (!kvm_dr7_valid(val))
1238 return 1;
1239 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
1240 kvm_update_dr7(vcpu);
1241 break;
1242 }
1243
1244 return 0;
1245}
1246EXPORT_SYMBOL_GPL(kvm_set_dr);
1247
1248void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
1249{
1250 size_t size = ARRAY_SIZE(vcpu->arch.db);
1251
1252 switch (dr) {
1253 case 0 ... 3:
1254 *val = vcpu->arch.db[array_index_nospec(dr, size)];
1255 break;
1256 case 4:
1257 case 6:
1258 *val = vcpu->arch.dr6;
1259 break;
1260 case 5:
1261 default:
1262 *val = vcpu->arch.dr7;
1263 break;
1264 }
1265}
1266EXPORT_SYMBOL_GPL(kvm_get_dr);
1267
1268int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
1269{
1270 u32 ecx = kvm_rcx_read(vcpu);
1271 u64 data;
1272
1273 if (kvm_pmu_rdpmc(vcpu, ecx, &data)) {
1274 kvm_inject_gp(vcpu, 0);
1275 return 1;
1276 }
1277
1278 kvm_rax_write(vcpu, (u32)data);
1279 kvm_rdx_write(vcpu, data >> 32);
1280 return kvm_skip_emulated_instruction(vcpu);
1281}
1282EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296static const u32 msrs_to_save_all[] = {
1297 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
1298 MSR_STAR,
1299#ifdef CONFIG_X86_64
1300 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
1301#endif
1302 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
1303 MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
1304 MSR_IA32_SPEC_CTRL,
1305 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
1306 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
1307 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
1308 MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
1309 MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
1310 MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
1311 MSR_IA32_UMWAIT_CONTROL,
1312
1313 MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
1314 MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3,
1315 MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
1316 MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
1317 MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
1318 MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
1319 MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
1320 MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
1321 MSR_ARCH_PERFMON_PERFCTR0 + 8, MSR_ARCH_PERFMON_PERFCTR0 + 9,
1322 MSR_ARCH_PERFMON_PERFCTR0 + 10, MSR_ARCH_PERFMON_PERFCTR0 + 11,
1323 MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13,
1324 MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15,
1325 MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17,
1326 MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
1327 MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
1328 MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
1329 MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
1330 MSR_ARCH_PERFMON_EVENTSEL0 + 8, MSR_ARCH_PERFMON_EVENTSEL0 + 9,
1331 MSR_ARCH_PERFMON_EVENTSEL0 + 10, MSR_ARCH_PERFMON_EVENTSEL0 + 11,
1332 MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
1333 MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
1334 MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
1335
1336 MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
1337 MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
1338 MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
1339 MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
1340 MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
1341 MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
1342};
1343
1344static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
1345static unsigned num_msrs_to_save;
1346
1347static const u32 emulated_msrs_all[] = {
1348 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
1349 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
1350 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
1351 HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
1352 HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
1353 HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
1354 HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
1355 HV_X64_MSR_RESET,
1356 HV_X64_MSR_VP_INDEX,
1357 HV_X64_MSR_VP_RUNTIME,
1358 HV_X64_MSR_SCONTROL,
1359 HV_X64_MSR_STIMER0_CONFIG,
1360 HV_X64_MSR_VP_ASSIST_PAGE,
1361 HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
1362 HV_X64_MSR_TSC_EMULATION_STATUS,
1363 HV_X64_MSR_SYNDBG_OPTIONS,
1364 HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
1365 HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
1366 HV_X64_MSR_SYNDBG_PENDING_BUFFER,
1367
1368 MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
1369 MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
1370
1371 MSR_IA32_TSC_ADJUST,
1372 MSR_IA32_TSC_DEADLINE,
1373 MSR_IA32_ARCH_CAPABILITIES,
1374 MSR_IA32_PERF_CAPABILITIES,
1375 MSR_IA32_MISC_ENABLE,
1376 MSR_IA32_MCG_STATUS,
1377 MSR_IA32_MCG_CTL,
1378 MSR_IA32_MCG_EXT_CTL,
1379 MSR_IA32_SMBASE,
1380 MSR_SMI_COUNT,
1381 MSR_PLATFORM_INFO,
1382 MSR_MISC_FEATURES_ENABLES,
1383 MSR_AMD64_VIRT_SPEC_CTRL,
1384 MSR_IA32_POWER_CTL,
1385 MSR_IA32_UCODE_REV,
1386
1387
1388
1389
1390
1391
1392
1393
1394 MSR_IA32_VMX_BASIC,
1395 MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1396 MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1397 MSR_IA32_VMX_TRUE_EXIT_CTLS,
1398 MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1399 MSR_IA32_VMX_MISC,
1400 MSR_IA32_VMX_CR0_FIXED0,
1401 MSR_IA32_VMX_CR4_FIXED0,
1402 MSR_IA32_VMX_VMCS_ENUM,
1403 MSR_IA32_VMX_PROCBASED_CTLS2,
1404 MSR_IA32_VMX_EPT_VPID_CAP,
1405 MSR_IA32_VMX_VMFUNC,
1406
1407 MSR_K7_HWCR,
1408 MSR_KVM_POLL_CONTROL,
1409};
1410
1411static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
1412static unsigned num_emulated_msrs;
1413
1414
1415
1416
1417
1418static const u32 msr_based_features_all[] = {
1419 MSR_IA32_VMX_BASIC,
1420 MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1421 MSR_IA32_VMX_PINBASED_CTLS,
1422 MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1423 MSR_IA32_VMX_PROCBASED_CTLS,
1424 MSR_IA32_VMX_TRUE_EXIT_CTLS,
1425 MSR_IA32_VMX_EXIT_CTLS,
1426 MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1427 MSR_IA32_VMX_ENTRY_CTLS,
1428 MSR_IA32_VMX_MISC,
1429 MSR_IA32_VMX_CR0_FIXED0,
1430 MSR_IA32_VMX_CR0_FIXED1,
1431 MSR_IA32_VMX_CR4_FIXED0,
1432 MSR_IA32_VMX_CR4_FIXED1,
1433 MSR_IA32_VMX_VMCS_ENUM,
1434 MSR_IA32_VMX_PROCBASED_CTLS2,
1435 MSR_IA32_VMX_EPT_VPID_CAP,
1436 MSR_IA32_VMX_VMFUNC,
1437
1438 MSR_F10H_DECFG,
1439 MSR_IA32_UCODE_REV,
1440 MSR_IA32_ARCH_CAPABILITIES,
1441 MSR_IA32_PERF_CAPABILITIES,
1442};
1443
1444static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)];
1445static unsigned int num_msr_based_features;
1446
1447static u64 kvm_get_arch_capabilities(void)
1448{
1449 u64 data = 0;
1450
1451 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1452 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
1453
1454
1455
1456
1457
1458
1459
1460 data |= ARCH_CAP_PSCHANGE_MC_NO;
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
1472 data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
1473
1474 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
1475 data |= ARCH_CAP_RDCL_NO;
1476 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1477 data |= ARCH_CAP_SSB_NO;
1478 if (!boot_cpu_has_bug(X86_BUG_MDS))
1479 data |= ARCH_CAP_MDS_NO;
1480
1481 if (!boot_cpu_has(X86_FEATURE_RTM)) {
1482
1483
1484
1485
1486
1487
1488
1489 data &= ~ARCH_CAP_TAA_NO;
1490 } else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
1491 data |= ARCH_CAP_TAA_NO;
1492 } else {
1493
1494
1495
1496
1497
1498 }
1499
1500 return data;
1501}
1502
1503static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
1504{
1505 switch (msr->index) {
1506 case MSR_IA32_ARCH_CAPABILITIES:
1507 msr->data = kvm_get_arch_capabilities();
1508 break;
1509 case MSR_IA32_UCODE_REV:
1510 rdmsrl_safe(msr->index, &msr->data);
1511 break;
1512 default:
1513 return static_call(kvm_x86_get_msr_feature)(msr);
1514 }
1515 return 0;
1516}
1517
1518static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1519{
1520 struct kvm_msr_entry msr;
1521 int r;
1522
1523 msr.index = index;
1524 r = kvm_get_msr_feature(&msr);
1525
1526 if (r == KVM_MSR_RET_INVALID) {
1527
1528 *data = 0;
1529 if (kvm_msr_ignored_check(index, 0, false))
1530 r = 0;
1531 }
1532
1533 if (r)
1534 return r;
1535
1536 *data = msr.data;
1537
1538 return 0;
1539}
1540
1541static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1542{
1543 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
1544 return false;
1545
1546 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
1547 return false;
1548
1549 if (efer & (EFER_LME | EFER_LMA) &&
1550 !guest_cpuid_has(vcpu, X86_FEATURE_LM))
1551 return false;
1552
1553 if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX))
1554 return false;
1555
1556 return true;
1557
1558}
1559bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1560{
1561 if (efer & efer_reserved_bits)
1562 return false;
1563
1564 return __kvm_valid_efer(vcpu, efer);
1565}
1566EXPORT_SYMBOL_GPL(kvm_valid_efer);
1567
1568static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1569{
1570 u64 old_efer = vcpu->arch.efer;
1571 u64 efer = msr_info->data;
1572 int r;
1573
1574 if (efer & efer_reserved_bits)
1575 return 1;
1576
1577 if (!msr_info->host_initiated) {
1578 if (!__kvm_valid_efer(vcpu, efer))
1579 return 1;
1580
1581 if (is_paging(vcpu) &&
1582 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1583 return 1;
1584 }
1585
1586 efer &= ~EFER_LMA;
1587 efer |= vcpu->arch.efer & EFER_LMA;
1588
1589 r = static_call(kvm_x86_set_efer)(vcpu, efer);
1590 if (r) {
1591 WARN_ON(r > 0);
1592 return r;
1593 }
1594
1595
1596 if ((efer ^ old_efer) & EFER_NX)
1597 kvm_mmu_reset_context(vcpu);
1598
1599 return 0;
1600}
1601
1602void kvm_enable_efer_bits(u64 mask)
1603{
1604 efer_reserved_bits &= ~mask;
1605}
1606EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
1607
1608bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
1609{
1610 struct kvm_x86_msr_filter *msr_filter;
1611 struct msr_bitmap_range *ranges;
1612 struct kvm *kvm = vcpu->kvm;
1613 bool allowed;
1614 int idx;
1615 u32 i;
1616
1617
1618 if (index >= 0x800 && index <= 0x8ff)
1619 return true;
1620
1621 idx = srcu_read_lock(&kvm->srcu);
1622
1623 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
1624 if (!msr_filter) {
1625 allowed = true;
1626 goto out;
1627 }
1628
1629 allowed = msr_filter->default_allow;
1630 ranges = msr_filter->ranges;
1631
1632 for (i = 0; i < msr_filter->count; i++) {
1633 u32 start = ranges[i].base;
1634 u32 end = start + ranges[i].nmsrs;
1635 u32 flags = ranges[i].flags;
1636 unsigned long *bitmap = ranges[i].bitmap;
1637
1638 if ((index >= start) && (index < end) && (flags & type)) {
1639 allowed = !!test_bit(index - start, bitmap);
1640 break;
1641 }
1642 }
1643
1644out:
1645 srcu_read_unlock(&kvm->srcu, idx);
1646
1647 return allowed;
1648}
1649EXPORT_SYMBOL_GPL(kvm_msr_allowed);
1650
1651
1652
1653
1654
1655
1656
1657static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
1658 bool host_initiated)
1659{
1660 struct msr_data msr;
1661
1662 if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
1663 return KVM_MSR_RET_FILTERED;
1664
1665 switch (index) {
1666 case MSR_FS_BASE:
1667 case MSR_GS_BASE:
1668 case MSR_KERNEL_GS_BASE:
1669 case MSR_CSTAR:
1670 case MSR_LSTAR:
1671 if (is_noncanonical_address(data, vcpu))
1672 return 1;
1673 break;
1674 case MSR_IA32_SYSENTER_EIP:
1675 case MSR_IA32_SYSENTER_ESP:
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688 data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
1689 break;
1690 case MSR_TSC_AUX:
1691 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
1692 return 1;
1693
1694 if (!host_initiated &&
1695 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
1696 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
1697 return 1;
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708 if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0)
1709 return 1;
1710
1711 data = (u32)data;
1712 break;
1713 }
1714
1715 msr.data = data;
1716 msr.index = index;
1717 msr.host_initiated = host_initiated;
1718
1719 return static_call(kvm_x86_set_msr)(vcpu, &msr);
1720}
1721
1722static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
1723 u32 index, u64 data, bool host_initiated)
1724{
1725 int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
1726
1727 if (ret == KVM_MSR_RET_INVALID)
1728 if (kvm_msr_ignored_check(index, data, true))
1729 ret = 0;
1730
1731 return ret;
1732}
1733
1734
1735
1736
1737
1738
1739
1740int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1741 bool host_initiated)
1742{
1743 struct msr_data msr;
1744 int ret;
1745
1746 if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
1747 return KVM_MSR_RET_FILTERED;
1748
1749 switch (index) {
1750 case MSR_TSC_AUX:
1751 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
1752 return 1;
1753
1754 if (!host_initiated &&
1755 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
1756 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
1757 return 1;
1758 break;
1759 }
1760
1761 msr.index = index;
1762 msr.host_initiated = host_initiated;
1763
1764 ret = static_call(kvm_x86_get_msr)(vcpu, &msr);
1765 if (!ret)
1766 *data = msr.data;
1767 return ret;
1768}
1769
1770static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
1771 u32 index, u64 *data, bool host_initiated)
1772{
1773 int ret = __kvm_get_msr(vcpu, index, data, host_initiated);
1774
1775 if (ret == KVM_MSR_RET_INVALID) {
1776
1777 *data = 0;
1778 if (kvm_msr_ignored_check(index, 0, false))
1779 ret = 0;
1780 }
1781
1782 return ret;
1783}
1784
1785int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1786{
1787 return kvm_get_msr_ignored_check(vcpu, index, data, false);
1788}
1789EXPORT_SYMBOL_GPL(kvm_get_msr);
1790
1791int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
1792{
1793 return kvm_set_msr_ignored_check(vcpu, index, data, false);
1794}
1795EXPORT_SYMBOL_GPL(kvm_set_msr);
1796
1797static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
1798{
1799 int err = vcpu->run->msr.error;
1800 if (!err) {
1801 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data);
1802 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32);
1803 }
1804
1805 return static_call(kvm_x86_complete_emulated_msr)(vcpu, err);
1806}
1807
1808static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu)
1809{
1810 return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error);
1811}
1812
1813static u64 kvm_msr_reason(int r)
1814{
1815 switch (r) {
1816 case KVM_MSR_RET_INVALID:
1817 return KVM_MSR_EXIT_REASON_UNKNOWN;
1818 case KVM_MSR_RET_FILTERED:
1819 return KVM_MSR_EXIT_REASON_FILTER;
1820 default:
1821 return KVM_MSR_EXIT_REASON_INVAL;
1822 }
1823}
1824
1825static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index,
1826 u32 exit_reason, u64 data,
1827 int (*completion)(struct kvm_vcpu *vcpu),
1828 int r)
1829{
1830 u64 msr_reason = kvm_msr_reason(r);
1831
1832
1833 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason))
1834 return 0;
1835
1836 vcpu->run->exit_reason = exit_reason;
1837 vcpu->run->msr.error = 0;
1838 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad));
1839 vcpu->run->msr.reason = msr_reason;
1840 vcpu->run->msr.index = index;
1841 vcpu->run->msr.data = data;
1842 vcpu->arch.complete_userspace_io = completion;
1843
1844 return 1;
1845}
1846
1847static int kvm_get_msr_user_space(struct kvm_vcpu *vcpu, u32 index, int r)
1848{
1849 return kvm_msr_user_space(vcpu, index, KVM_EXIT_X86_RDMSR, 0,
1850 complete_emulated_rdmsr, r);
1851}
1852
1853static int kvm_set_msr_user_space(struct kvm_vcpu *vcpu, u32 index, u64 data, int r)
1854{
1855 return kvm_msr_user_space(vcpu, index, KVM_EXIT_X86_WRMSR, data,
1856 complete_emulated_wrmsr, r);
1857}
1858
1859int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
1860{
1861 u32 ecx = kvm_rcx_read(vcpu);
1862 u64 data;
1863 int r;
1864
1865 r = kvm_get_msr(vcpu, ecx, &data);
1866
1867
1868 if (r && kvm_get_msr_user_space(vcpu, ecx, r)) {
1869
1870 return 0;
1871 }
1872
1873 if (!r) {
1874 trace_kvm_msr_read(ecx, data);
1875
1876 kvm_rax_write(vcpu, data & -1u);
1877 kvm_rdx_write(vcpu, (data >> 32) & -1u);
1878 } else {
1879 trace_kvm_msr_read_ex(ecx);
1880 }
1881
1882 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r);
1883}
1884EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr);
1885
1886int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
1887{
1888 u32 ecx = kvm_rcx_read(vcpu);
1889 u64 data = kvm_read_edx_eax(vcpu);
1890 int r;
1891
1892 r = kvm_set_msr(vcpu, ecx, data);
1893
1894
1895 if (r && kvm_set_msr_user_space(vcpu, ecx, data, r))
1896
1897 return 0;
1898
1899
1900 if (r < 0)
1901 return r;
1902
1903 if (!r)
1904 trace_kvm_msr_write(ecx, data);
1905 else
1906 trace_kvm_msr_write_ex(ecx, data);
1907
1908 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r);
1909}
1910EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
1911
1912int kvm_emulate_as_nop(struct kvm_vcpu *vcpu)
1913{
1914 return kvm_skip_emulated_instruction(vcpu);
1915}
1916EXPORT_SYMBOL_GPL(kvm_emulate_as_nop);
1917
1918int kvm_emulate_invd(struct kvm_vcpu *vcpu)
1919{
1920
1921 return kvm_emulate_as_nop(vcpu);
1922}
1923EXPORT_SYMBOL_GPL(kvm_emulate_invd);
1924
1925int kvm_emulate_mwait(struct kvm_vcpu *vcpu)
1926{
1927 pr_warn_once("kvm: MWAIT instruction emulated as NOP!\n");
1928 return kvm_emulate_as_nop(vcpu);
1929}
1930EXPORT_SYMBOL_GPL(kvm_emulate_mwait);
1931
1932int kvm_handle_invalid_op(struct kvm_vcpu *vcpu)
1933{
1934 kvm_queue_exception(vcpu, UD_VECTOR);
1935 return 1;
1936}
1937EXPORT_SYMBOL_GPL(kvm_handle_invalid_op);
1938
1939int kvm_emulate_monitor(struct kvm_vcpu *vcpu)
1940{
1941 pr_warn_once("kvm: MONITOR instruction emulated as NOP!\n");
1942 return kvm_emulate_as_nop(vcpu);
1943}
1944EXPORT_SYMBOL_GPL(kvm_emulate_monitor);
1945
1946static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
1947{
1948 xfer_to_guest_mode_prepare();
1949 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
1950 xfer_to_guest_mode_work_pending();
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
1961{
1962 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic))
1963 return 1;
1964
1965 if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) &&
1966 ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
1967 ((data & APIC_MODE_MASK) == APIC_DM_FIXED) &&
1968 ((u32)(data >> 32) != X2APIC_BROADCAST)) {
1969
1970 data &= ~(1 << 12);
1971 kvm_apic_send_ipi(vcpu->arch.apic, (u32)data, (u32)(data >> 32));
1972 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR2, (u32)(data >> 32));
1973 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR, (u32)data);
1974 trace_kvm_apic_write(APIC_ICR, (u32)data);
1975 return 0;
1976 }
1977
1978 return 1;
1979}
1980
1981static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
1982{
1983 if (!kvm_can_use_hv_timer(vcpu))
1984 return 1;
1985
1986 kvm_set_lapic_tscdeadline_msr(vcpu, data);
1987 return 0;
1988}
1989
1990fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
1991{
1992 u32 msr = kvm_rcx_read(vcpu);
1993 u64 data;
1994 fastpath_t ret = EXIT_FASTPATH_NONE;
1995
1996 switch (msr) {
1997 case APIC_BASE_MSR + (APIC_ICR >> 4):
1998 data = kvm_read_edx_eax(vcpu);
1999 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) {
2000 kvm_skip_emulated_instruction(vcpu);
2001 ret = EXIT_FASTPATH_EXIT_HANDLED;
2002 }
2003 break;
2004 case MSR_IA32_TSC_DEADLINE:
2005 data = kvm_read_edx_eax(vcpu);
2006 if (!handle_fastpath_set_tscdeadline(vcpu, data)) {
2007 kvm_skip_emulated_instruction(vcpu);
2008 ret = EXIT_FASTPATH_REENTER_GUEST;
2009 }
2010 break;
2011 default:
2012 break;
2013 }
2014
2015 if (ret != EXIT_FASTPATH_NONE)
2016 trace_kvm_msr_write(msr, data);
2017
2018 return ret;
2019}
2020EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
2021
2022
2023
2024
2025static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2026{
2027 return kvm_get_msr_ignored_check(vcpu, index, data, true);
2028}
2029
2030static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2031{
2032 return kvm_set_msr_ignored_check(vcpu, index, *data, true);
2033}
2034
2035#ifdef CONFIG_X86_64
2036struct pvclock_clock {
2037 int vclock_mode;
2038 u64 cycle_last;
2039 u64 mask;
2040 u32 mult;
2041 u32 shift;
2042 u64 base_cycles;
2043 u64 offset;
2044};
2045
2046struct pvclock_gtod_data {
2047 seqcount_t seq;
2048
2049 struct pvclock_clock clock;
2050 struct pvclock_clock raw_clock;
2051
2052 ktime_t offs_boot;
2053 u64 wall_time_sec;
2054};
2055
2056static struct pvclock_gtod_data pvclock_gtod_data;
2057
2058static void update_pvclock_gtod(struct timekeeper *tk)
2059{
2060 struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
2061
2062 write_seqcount_begin(&vdata->seq);
2063
2064
2065 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode;
2066 vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
2067 vdata->clock.mask = tk->tkr_mono.mask;
2068 vdata->clock.mult = tk->tkr_mono.mult;
2069 vdata->clock.shift = tk->tkr_mono.shift;
2070 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec;
2071 vdata->clock.offset = tk->tkr_mono.base;
2072
2073 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode;
2074 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last;
2075 vdata->raw_clock.mask = tk->tkr_raw.mask;
2076 vdata->raw_clock.mult = tk->tkr_raw.mult;
2077 vdata->raw_clock.shift = tk->tkr_raw.shift;
2078 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec;
2079 vdata->raw_clock.offset = tk->tkr_raw.base;
2080
2081 vdata->wall_time_sec = tk->xtime_sec;
2082
2083 vdata->offs_boot = tk->offs_boot;
2084
2085 write_seqcount_end(&vdata->seq);
2086}
2087
2088static s64 get_kvmclock_base_ns(void)
2089{
2090
2091 return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot));
2092}
2093#else
2094static s64 get_kvmclock_base_ns(void)
2095{
2096
2097 return ktime_get_boottime_ns();
2098}
2099#endif
2100
2101void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs)
2102{
2103 int version;
2104 int r;
2105 struct pvclock_wall_clock wc;
2106 u32 wc_sec_hi;
2107 u64 wall_nsec;
2108
2109 if (!wall_clock)
2110 return;
2111
2112 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
2113 if (r)
2114 return;
2115
2116 if (version & 1)
2117 ++version;
2118
2119 ++version;
2120
2121 if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
2122 return;
2123
2124
2125
2126
2127
2128
2129 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
2130
2131 wc.nsec = do_div(wall_nsec, 1000000000);
2132 wc.sec = (u32)wall_nsec;
2133 wc.version = version;
2134
2135 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
2136
2137 if (sec_hi_ofs) {
2138 wc_sec_hi = wall_nsec >> 32;
2139 kvm_write_guest(kvm, wall_clock + sec_hi_ofs,
2140 &wc_sec_hi, sizeof(wc_sec_hi));
2141 }
2142
2143 version++;
2144 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
2145}
2146
2147static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
2148 bool old_msr, bool host_initiated)
2149{
2150 struct kvm_arch *ka = &vcpu->kvm->arch;
2151
2152 if (vcpu->vcpu_id == 0 && !host_initiated) {
2153 if (ka->boot_vcpu_runs_old_kvmclock != old_msr)
2154 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2155
2156 ka->boot_vcpu_runs_old_kvmclock = old_msr;
2157 }
2158
2159 vcpu->arch.time = system_time;
2160 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2161
2162
2163 vcpu->arch.pv_time_enabled = false;
2164 if (!(system_time & 1))
2165 return;
2166
2167 if (!kvm_gfn_to_hva_cache_init(vcpu->kvm,
2168 &vcpu->arch.pv_time, system_time & ~1ULL,
2169 sizeof(struct pvclock_vcpu_time_info)))
2170 vcpu->arch.pv_time_enabled = true;
2171
2172 return;
2173}
2174
2175static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
2176{
2177 do_shl32_div32(dividend, divisor);
2178 return dividend;
2179}
2180
2181static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
2182 s8 *pshift, u32 *pmultiplier)
2183{
2184 uint64_t scaled64;
2185 int32_t shift = 0;
2186 uint64_t tps64;
2187 uint32_t tps32;
2188
2189 tps64 = base_hz;
2190 scaled64 = scaled_hz;
2191 while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
2192 tps64 >>= 1;
2193 shift--;
2194 }
2195
2196 tps32 = (uint32_t)tps64;
2197 while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
2198 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
2199 scaled64 >>= 1;
2200 else
2201 tps32 <<= 1;
2202 shift++;
2203 }
2204
2205 *pshift = shift;
2206 *pmultiplier = div_frac(scaled64, tps32);
2207}
2208
2209#ifdef CONFIG_X86_64
2210static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
2211#endif
2212
2213static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
2214static unsigned long max_tsc_khz;
2215
2216static u32 adjust_tsc_khz(u32 khz, s32 ppm)
2217{
2218 u64 v = (u64)khz * (1000000 + ppm);
2219 do_div(v, 1000000);
2220 return v;
2221}
2222
2223static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
2224
2225static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
2226{
2227 u64 ratio;
2228
2229
2230 if (!scale) {
2231 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio);
2232 return 0;
2233 }
2234
2235
2236 if (!kvm_has_tsc_control) {
2237 if (user_tsc_khz > tsc_khz) {
2238 vcpu->arch.tsc_catchup = 1;
2239 vcpu->arch.tsc_always_catchup = 1;
2240 return 0;
2241 } else {
2242 pr_warn_ratelimited("user requested TSC rate below hardware speed\n");
2243 return -1;
2244 }
2245 }
2246
2247
2248 ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
2249 user_tsc_khz, tsc_khz);
2250
2251 if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
2252 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
2253 user_tsc_khz);
2254 return -1;
2255 }
2256
2257 kvm_vcpu_write_tsc_multiplier(vcpu, ratio);
2258 return 0;
2259}
2260
2261static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
2262{
2263 u32 thresh_lo, thresh_hi;
2264 int use_scaling = 0;
2265
2266
2267 if (user_tsc_khz == 0) {
2268
2269 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio);
2270 return -1;
2271 }
2272
2273
2274 kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
2275 &vcpu->arch.virtual_tsc_shift,
2276 &vcpu->arch.virtual_tsc_mult);
2277 vcpu->arch.virtual_tsc_khz = user_tsc_khz;
2278
2279
2280
2281
2282
2283
2284
2285 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
2286 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
2287 if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
2288 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi);
2289 use_scaling = 1;
2290 }
2291 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
2292}
2293
2294static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
2295{
2296 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
2297 vcpu->arch.virtual_tsc_mult,
2298 vcpu->arch.virtual_tsc_shift);
2299 tsc += vcpu->arch.this_tsc_write;
2300 return tsc;
2301}
2302
2303static inline int gtod_is_based_on_tsc(int mode)
2304{
2305 return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
2306}
2307
2308static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
2309{
2310#ifdef CONFIG_X86_64
2311 bool vcpus_matched;
2312 struct kvm_arch *ka = &vcpu->kvm->arch;
2313 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2314
2315 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
2316 atomic_read(&vcpu->kvm->online_vcpus));
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326 if (ka->use_master_clock ||
2327 (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched))
2328 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2329
2330 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
2331 atomic_read(&vcpu->kvm->online_vcpus),
2332 ka->use_master_clock, gtod->clock.vclock_mode);
2333#endif
2334}
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346static inline u64 __scale_tsc(u64 ratio, u64 tsc)
2347{
2348 return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
2349}
2350
2351u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio)
2352{
2353 u64 _tsc = tsc;
2354
2355 if (ratio != kvm_default_tsc_scaling_ratio)
2356 _tsc = __scale_tsc(ratio, tsc);
2357
2358 return _tsc;
2359}
2360EXPORT_SYMBOL_GPL(kvm_scale_tsc);
2361
2362static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
2363{
2364 u64 tsc;
2365
2366 tsc = kvm_scale_tsc(vcpu, rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
2367
2368 return target_tsc - tsc;
2369}
2370
2371u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2372{
2373 return vcpu->arch.l1_tsc_offset +
2374 kvm_scale_tsc(vcpu, host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
2375}
2376EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
2377
2378u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
2379{
2380 u64 nested_offset;
2381
2382 if (l2_multiplier == kvm_default_tsc_scaling_ratio)
2383 nested_offset = l1_offset;
2384 else
2385 nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier,
2386 kvm_tsc_scaling_ratio_frac_bits);
2387
2388 nested_offset += l2_offset;
2389 return nested_offset;
2390}
2391EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset);
2392
2393u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
2394{
2395 if (l2_multiplier != kvm_default_tsc_scaling_ratio)
2396 return mul_u64_u64_shr(l1_multiplier, l2_multiplier,
2397 kvm_tsc_scaling_ratio_frac_bits);
2398
2399 return l1_multiplier;
2400}
2401EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier);
2402
2403static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
2404{
2405 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2406 vcpu->arch.l1_tsc_offset,
2407 l1_offset);
2408
2409 vcpu->arch.l1_tsc_offset = l1_offset;
2410
2411
2412
2413
2414
2415
2416 if (is_guest_mode(vcpu))
2417 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
2418 l1_offset,
2419 static_call(kvm_x86_get_l2_tsc_offset)(vcpu),
2420 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
2421 else
2422 vcpu->arch.tsc_offset = l1_offset;
2423
2424 static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset);
2425}
2426
2427static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
2428{
2429 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier;
2430
2431
2432 if (is_guest_mode(vcpu))
2433 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
2434 l1_multiplier,
2435 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
2436 else
2437 vcpu->arch.tsc_scaling_ratio = l1_multiplier;
2438
2439 if (kvm_has_tsc_control)
2440 static_call(kvm_x86_write_tsc_multiplier)(
2441 vcpu, vcpu->arch.tsc_scaling_ratio);
2442}
2443
2444static inline bool kvm_check_tsc_unstable(void)
2445{
2446#ifdef CONFIG_X86_64
2447
2448
2449
2450
2451 if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK)
2452 return false;
2453#endif
2454 return check_tsc_unstable();
2455}
2456
2457static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
2458{
2459 struct kvm *kvm = vcpu->kvm;
2460 u64 offset, ns, elapsed;
2461 unsigned long flags;
2462 bool matched;
2463 bool already_matched;
2464 bool synchronizing = false;
2465
2466 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
2467 offset = kvm_compute_l1_tsc_offset(vcpu, data);
2468 ns = get_kvmclock_base_ns();
2469 elapsed = ns - kvm->arch.last_tsc_nsec;
2470
2471 if (vcpu->arch.virtual_tsc_khz) {
2472 if (data == 0) {
2473
2474
2475
2476
2477
2478 synchronizing = true;
2479 } else {
2480 u64 tsc_exp = kvm->arch.last_tsc_write +
2481 nsec_to_cycles(vcpu, elapsed);
2482 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
2483
2484
2485
2486
2487
2488 synchronizing = data < tsc_exp + tsc_hz &&
2489 data + tsc_hz > tsc_exp;
2490 }
2491 }
2492
2493
2494
2495
2496
2497
2498
2499 if (synchronizing &&
2500 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
2501 if (!kvm_check_tsc_unstable()) {
2502 offset = kvm->arch.cur_tsc_offset;
2503 } else {
2504 u64 delta = nsec_to_cycles(vcpu, elapsed);
2505 data += delta;
2506 offset = kvm_compute_l1_tsc_offset(vcpu, data);
2507 }
2508 matched = true;
2509 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
2510 } else {
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520 kvm->arch.cur_tsc_generation++;
2521 kvm->arch.cur_tsc_nsec = ns;
2522 kvm->arch.cur_tsc_write = data;
2523 kvm->arch.cur_tsc_offset = offset;
2524 matched = false;
2525 }
2526
2527
2528
2529
2530
2531 kvm->arch.last_tsc_nsec = ns;
2532 kvm->arch.last_tsc_write = data;
2533 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
2534
2535 vcpu->arch.last_guest_tsc = data;
2536
2537
2538 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
2539 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
2540 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
2541
2542 kvm_vcpu_write_tsc_offset(vcpu, offset);
2543 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
2544
2545 raw_spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
2546 if (!matched) {
2547 kvm->arch.nr_vcpus_matched_tsc = 0;
2548 } else if (!already_matched) {
2549 kvm->arch.nr_vcpus_matched_tsc++;
2550 }
2551
2552 kvm_track_tsc_matching(vcpu);
2553 raw_spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
2554}
2555
2556static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
2557 s64 adjustment)
2558{
2559 u64 tsc_offset = vcpu->arch.l1_tsc_offset;
2560 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
2561}
2562
2563static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
2564{
2565 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
2566 WARN_ON(adjustment < 0);
2567 adjustment = kvm_scale_tsc(vcpu, (u64) adjustment,
2568 vcpu->arch.l1_tsc_scaling_ratio);
2569 adjust_tsc_offset_guest(vcpu, adjustment);
2570}
2571
2572#ifdef CONFIG_X86_64
2573
2574static u64 read_tsc(void)
2575{
2576 u64 ret = (u64)rdtsc_ordered();
2577 u64 last = pvclock_gtod_data.clock.cycle_last;
2578
2579 if (likely(ret >= last))
2580 return ret;
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590 asm volatile ("");
2591 return last;
2592}
2593
2594static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
2595 int *mode)
2596{
2597 long v;
2598 u64 tsc_pg_val;
2599
2600 switch (clock->vclock_mode) {
2601 case VDSO_CLOCKMODE_HVCLOCK:
2602 tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(),
2603 tsc_timestamp);
2604 if (tsc_pg_val != U64_MAX) {
2605
2606 *mode = VDSO_CLOCKMODE_HVCLOCK;
2607 v = (tsc_pg_val - clock->cycle_last) &
2608 clock->mask;
2609 } else {
2610
2611 *mode = VDSO_CLOCKMODE_NONE;
2612 }
2613 break;
2614 case VDSO_CLOCKMODE_TSC:
2615 *mode = VDSO_CLOCKMODE_TSC;
2616 *tsc_timestamp = read_tsc();
2617 v = (*tsc_timestamp - clock->cycle_last) &
2618 clock->mask;
2619 break;
2620 default:
2621 *mode = VDSO_CLOCKMODE_NONE;
2622 }
2623
2624 if (*mode == VDSO_CLOCKMODE_NONE)
2625 *tsc_timestamp = v = 0;
2626
2627 return v * clock->mult;
2628}
2629
2630static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp)
2631{
2632 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2633 unsigned long seq;
2634 int mode;
2635 u64 ns;
2636
2637 do {
2638 seq = read_seqcount_begin(>od->seq);
2639 ns = gtod->raw_clock.base_cycles;
2640 ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode);
2641 ns >>= gtod->raw_clock.shift;
2642 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot));
2643 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
2644 *t = ns;
2645
2646 return mode;
2647}
2648
2649static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
2650{
2651 struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2652 unsigned long seq;
2653 int mode;
2654 u64 ns;
2655
2656 do {
2657 seq = read_seqcount_begin(>od->seq);
2658 ts->tv_sec = gtod->wall_time_sec;
2659 ns = gtod->clock.base_cycles;
2660 ns += vgettsc(>od->clock, tsc_timestamp, &mode);
2661 ns >>= gtod->clock.shift;
2662 } while (unlikely(read_seqcount_retry(>od->seq, seq)));
2663
2664 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
2665 ts->tv_nsec = ns;
2666
2667 return mode;
2668}
2669
2670
2671static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
2672{
2673
2674 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
2675 return false;
2676
2677 return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns,
2678 tsc_timestamp));
2679}
2680
2681
2682static bool kvm_get_walltime_and_clockread(struct timespec64 *ts,
2683 u64 *tsc_timestamp)
2684{
2685
2686 if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
2687 return false;
2688
2689 return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp));
2690}
2691#endif
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
2735{
2736#ifdef CONFIG_X86_64
2737 struct kvm_arch *ka = &kvm->arch;
2738 int vclock_mode;
2739 bool host_tsc_clocksource, vcpus_matched;
2740
2741 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
2742 atomic_read(&kvm->online_vcpus));
2743
2744
2745
2746
2747
2748 host_tsc_clocksource = kvm_get_time_and_clockread(
2749 &ka->master_kernel_ns,
2750 &ka->master_cycle_now);
2751
2752 ka->use_master_clock = host_tsc_clocksource && vcpus_matched
2753 && !ka->backwards_tsc_observed
2754 && !ka->boot_vcpu_runs_old_kvmclock;
2755
2756 if (ka->use_master_clock)
2757 atomic_set(&kvm_guest_has_master_clock, 1);
2758
2759 vclock_mode = pvclock_gtod_data.clock.vclock_mode;
2760 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
2761 vcpus_matched);
2762#endif
2763}
2764
2765void kvm_make_mclock_inprogress_request(struct kvm *kvm)
2766{
2767 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
2768}
2769
2770static void kvm_gen_update_masterclock(struct kvm *kvm)
2771{
2772#ifdef CONFIG_X86_64
2773 int i;
2774 struct kvm_vcpu *vcpu;
2775 struct kvm_arch *ka = &kvm->arch;
2776 unsigned long flags;
2777
2778 kvm_hv_invalidate_tsc_page(kvm);
2779
2780 kvm_make_mclock_inprogress_request(kvm);
2781
2782
2783 raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
2784 pvclock_update_vm_gtod_copy(kvm);
2785 raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
2786
2787 kvm_for_each_vcpu(i, vcpu, kvm)
2788 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2789
2790
2791 kvm_for_each_vcpu(i, vcpu, kvm)
2792 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
2793#endif
2794}
2795
2796u64 get_kvmclock_ns(struct kvm *kvm)
2797{
2798 struct kvm_arch *ka = &kvm->arch;
2799 struct pvclock_vcpu_time_info hv_clock;
2800 unsigned long flags;
2801 u64 ret;
2802
2803 raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
2804 if (!ka->use_master_clock) {
2805 raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
2806 return get_kvmclock_base_ns() + ka->kvmclock_offset;
2807 }
2808
2809 hv_clock.tsc_timestamp = ka->master_cycle_now;
2810 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
2811 raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
2812
2813
2814 get_cpu();
2815
2816 if (__this_cpu_read(cpu_tsc_khz)) {
2817 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
2818 &hv_clock.tsc_shift,
2819 &hv_clock.tsc_to_system_mul);
2820 ret = __pvclock_read_cycles(&hv_clock, rdtsc());
2821 } else
2822 ret = get_kvmclock_base_ns() + ka->kvmclock_offset;
2823
2824 put_cpu();
2825
2826 return ret;
2827}
2828
2829static void kvm_setup_pvclock_page(struct kvm_vcpu *v,
2830 struct gfn_to_hva_cache *cache,
2831 unsigned int offset)
2832{
2833 struct kvm_vcpu_arch *vcpu = &v->arch;
2834 struct pvclock_vcpu_time_info guest_hv_clock;
2835
2836 if (unlikely(kvm_read_guest_offset_cached(v->kvm, cache,
2837 &guest_hv_clock, offset, sizeof(guest_hv_clock))))
2838 return;
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
2855
2856 if (guest_hv_clock.version & 1)
2857 ++guest_hv_clock.version;
2858
2859 vcpu->hv_clock.version = guest_hv_clock.version + 1;
2860 kvm_write_guest_offset_cached(v->kvm, cache,
2861 &vcpu->hv_clock, offset,
2862 sizeof(vcpu->hv_clock.version));
2863
2864 smp_wmb();
2865
2866
2867 vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
2868
2869 if (vcpu->pvclock_set_guest_stopped_request) {
2870 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
2871 vcpu->pvclock_set_guest_stopped_request = false;
2872 }
2873
2874 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
2875
2876 kvm_write_guest_offset_cached(v->kvm, cache,
2877 &vcpu->hv_clock, offset,
2878 sizeof(vcpu->hv_clock));
2879
2880 smp_wmb();
2881
2882 vcpu->hv_clock.version++;
2883 kvm_write_guest_offset_cached(v->kvm, cache,
2884 &vcpu->hv_clock, offset,
2885 sizeof(vcpu->hv_clock.version));
2886}
2887
2888static int kvm_guest_time_update(struct kvm_vcpu *v)
2889{
2890 unsigned long flags, tgt_tsc_khz;
2891 struct kvm_vcpu_arch *vcpu = &v->arch;
2892 struct kvm_arch *ka = &v->kvm->arch;
2893 s64 kernel_ns;
2894 u64 tsc_timestamp, host_tsc;
2895 u8 pvclock_flags;
2896 bool use_master_clock;
2897
2898 kernel_ns = 0;
2899 host_tsc = 0;
2900
2901
2902
2903
2904
2905 raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
2906 use_master_clock = ka->use_master_clock;
2907 if (use_master_clock) {
2908 host_tsc = ka->master_cycle_now;
2909 kernel_ns = ka->master_kernel_ns;
2910 }
2911 raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
2912
2913
2914 local_irq_save(flags);
2915 tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz);
2916 if (unlikely(tgt_tsc_khz == 0)) {
2917 local_irq_restore(flags);
2918 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
2919 return 1;
2920 }
2921 if (!use_master_clock) {
2922 host_tsc = rdtsc();
2923 kernel_ns = get_kvmclock_base_ns();
2924 }
2925
2926 tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938 if (vcpu->tsc_catchup) {
2939 u64 tsc = compute_guest_tsc(v, kernel_ns);
2940 if (tsc > tsc_timestamp) {
2941 adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
2942 tsc_timestamp = tsc;
2943 }
2944 }
2945
2946 local_irq_restore(flags);
2947
2948
2949
2950 if (kvm_has_tsc_control)
2951 tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz,
2952 v->arch.l1_tsc_scaling_ratio);
2953
2954 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
2955 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
2956 &vcpu->hv_clock.tsc_shift,
2957 &vcpu->hv_clock.tsc_to_system_mul);
2958 vcpu->hw_tsc_khz = tgt_tsc_khz;
2959 }
2960
2961 vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
2962 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
2963 vcpu->last_guest_tsc = tsc_timestamp;
2964
2965
2966 pvclock_flags = 0;
2967 if (use_master_clock)
2968 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
2969
2970 vcpu->hv_clock.flags = pvclock_flags;
2971
2972 if (vcpu->pv_time_enabled)
2973 kvm_setup_pvclock_page(v, &vcpu->pv_time, 0);
2974 if (vcpu->xen.vcpu_info_set)
2975 kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache,
2976 offsetof(struct compat_vcpu_info, time));
2977 if (vcpu->xen.vcpu_time_info_set)
2978 kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
2979 if (!v->vcpu_idx)
2980 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
2981 return 0;
2982}
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998#define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
2999
3000static void kvmclock_update_fn(struct work_struct *work)
3001{
3002 int i;
3003 struct delayed_work *dwork = to_delayed_work(work);
3004 struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
3005 kvmclock_update_work);
3006 struct kvm *kvm = container_of(ka, struct kvm, arch);
3007 struct kvm_vcpu *vcpu;
3008
3009 kvm_for_each_vcpu(i, vcpu, kvm) {
3010 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3011 kvm_vcpu_kick(vcpu);
3012 }
3013}
3014
3015static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
3016{
3017 struct kvm *kvm = v->kvm;
3018
3019 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
3020 schedule_delayed_work(&kvm->arch.kvmclock_update_work,
3021 KVMCLOCK_UPDATE_DELAY);
3022}
3023
3024#define KVMCLOCK_SYNC_PERIOD (300 * HZ)
3025
3026static void kvmclock_sync_fn(struct work_struct *work)
3027{
3028 struct delayed_work *dwork = to_delayed_work(work);
3029 struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
3030 kvmclock_sync_work);
3031 struct kvm *kvm = container_of(ka, struct kvm, arch);
3032
3033 if (!kvmclock_periodic_sync)
3034 return;
3035
3036 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
3037 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
3038 KVMCLOCK_SYNC_PERIOD);
3039}
3040
3041
3042
3043
3044static bool can_set_mci_status(struct kvm_vcpu *vcpu)
3045{
3046
3047 if (guest_cpuid_is_amd_or_hygon(vcpu))
3048 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
3049
3050 return false;
3051}
3052
3053static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3054{
3055 u64 mcg_cap = vcpu->arch.mcg_cap;
3056 unsigned bank_num = mcg_cap & 0xff;
3057 u32 msr = msr_info->index;
3058 u64 data = msr_info->data;
3059
3060 switch (msr) {
3061 case MSR_IA32_MCG_STATUS:
3062 vcpu->arch.mcg_status = data;
3063 break;
3064 case MSR_IA32_MCG_CTL:
3065 if (!(mcg_cap & MCG_CTL_P) &&
3066 (data || !msr_info->host_initiated))
3067 return 1;
3068 if (data != 0 && data != ~(u64)0)
3069 return 1;
3070 vcpu->arch.mcg_ctl = data;
3071 break;
3072 default:
3073 if (msr >= MSR_IA32_MC0_CTL &&
3074 msr < MSR_IA32_MCx_CTL(bank_num)) {
3075 u32 offset = array_index_nospec(
3076 msr - MSR_IA32_MC0_CTL,
3077 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
3078
3079
3080
3081
3082
3083
3084 if ((offset & 0x3) == 0 &&
3085 data != 0 && (data | (1 << 10)) != ~(u64)0)
3086 return -1;
3087
3088
3089 if (!msr_info->host_initiated &&
3090 (offset & 0x3) == 1 && data != 0) {
3091 if (!can_set_mci_status(vcpu))
3092 return -1;
3093 }
3094
3095 vcpu->arch.mce_banks[offset] = data;
3096 break;
3097 }
3098 return 1;
3099 }
3100 return 0;
3101}
3102
3103static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
3104{
3105 u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
3106
3107 return (vcpu->arch.apf.msr_en_val & mask) == mask;
3108}
3109
3110static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
3111{
3112 gpa_t gpa = data & ~0x3f;
3113
3114
3115 if (data & 0x30)
3116 return 1;
3117
3118 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) &&
3119 (data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT))
3120 return 1;
3121
3122 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) &&
3123 (data & KVM_ASYNC_PF_DELIVERY_AS_INT))
3124 return 1;
3125
3126 if (!lapic_in_kernel(vcpu))
3127 return data ? 1 : 0;
3128
3129 vcpu->arch.apf.msr_en_val = data;
3130
3131 if (!kvm_pv_async_pf_enabled(vcpu)) {
3132 kvm_clear_async_pf_completion_queue(vcpu);
3133 kvm_async_pf_hash_reset(vcpu);
3134 return 0;
3135 }
3136
3137 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
3138 sizeof(u64)))
3139 return 1;
3140
3141 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
3142 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
3143
3144 kvm_async_pf_wakeup_all(vcpu);
3145
3146 return 0;
3147}
3148
3149static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
3150{
3151
3152 if (data >> 8)
3153 return 1;
3154
3155 if (!lapic_in_kernel(vcpu))
3156 return 1;
3157
3158 vcpu->arch.apf.msr_int_val = data;
3159
3160 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK;
3161
3162 return 0;
3163}
3164
3165static void kvmclock_reset(struct kvm_vcpu *vcpu)
3166{
3167 vcpu->arch.pv_time_enabled = false;
3168 vcpu->arch.time = 0;
3169}
3170
3171static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
3172{
3173 ++vcpu->stat.tlb_flush;
3174 static_call(kvm_x86_tlb_flush_all)(vcpu);
3175}
3176
3177static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
3178{
3179 ++vcpu->stat.tlb_flush;
3180
3181 if (!tdp_enabled) {
3182
3183
3184
3185
3186
3187
3188
3189 kvm_mmu_unload(vcpu);
3190 return;
3191 }
3192
3193 static_call(kvm_x86_tlb_flush_guest)(vcpu);
3194}
3195
3196static void record_steal_time(struct kvm_vcpu *vcpu)
3197{
3198 struct kvm_host_map map;
3199 struct kvm_steal_time *st;
3200
3201 if (kvm_xen_msr_enabled(vcpu->kvm)) {
3202 kvm_xen_runstate_set_running(vcpu);
3203 return;
3204 }
3205
3206 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
3207 return;
3208
3209
3210 if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
3211 &map, &vcpu->arch.st.cache, false))
3212 return;
3213
3214 st = map.hva +
3215 offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
3216
3217
3218
3219
3220
3221 if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
3222 u8 st_preempted = xchg(&st->preempted, 0);
3223
3224 trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
3225 st_preempted & KVM_VCPU_FLUSH_TLB);
3226 if (st_preempted & KVM_VCPU_FLUSH_TLB)
3227 kvm_vcpu_flush_tlb_guest(vcpu);
3228 } else {
3229 st->preempted = 0;
3230 }
3231
3232 vcpu->arch.st.preempted = 0;
3233
3234 if (st->version & 1)
3235 st->version += 1;
3236
3237 st->version += 1;
3238
3239 smp_wmb();
3240
3241 st->steal += current->sched_info.run_delay -
3242 vcpu->arch.st.last_steal;
3243 vcpu->arch.st.last_steal = current->sched_info.run_delay;
3244
3245 smp_wmb();
3246
3247 st->version += 1;
3248
3249 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
3250}
3251
3252int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3253{
3254 bool pr = false;
3255 u32 msr = msr_info->index;
3256 u64 data = msr_info->data;
3257
3258 if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr)
3259 return kvm_xen_write_hypercall_page(vcpu, data);
3260
3261 switch (msr) {
3262 case MSR_AMD64_NB_CFG:
3263 case MSR_IA32_UCODE_WRITE:
3264 case MSR_VM_HSAVE_PA:
3265 case MSR_AMD64_PATCH_LOADER:
3266 case MSR_AMD64_BU_CFG2:
3267 case MSR_AMD64_DC_CFG:
3268 case MSR_F15H_EX_CFG:
3269 break;
3270
3271 case MSR_IA32_UCODE_REV:
3272 if (msr_info->host_initiated)
3273 vcpu->arch.microcode_version = data;
3274 break;
3275 case MSR_IA32_ARCH_CAPABILITIES:
3276 if (!msr_info->host_initiated)
3277 return 1;
3278 vcpu->arch.arch_capabilities = data;
3279 break;
3280 case MSR_IA32_PERF_CAPABILITIES: {
3281 struct kvm_msr_entry msr_ent = {.index = msr, .data = 0};
3282
3283 if (!msr_info->host_initiated)
3284 return 1;
3285 if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM) && kvm_get_msr_feature(&msr_ent))
3286 return 1;
3287 if (data & ~msr_ent.data)
3288 return 1;
3289
3290 vcpu->arch.perf_capabilities = data;
3291
3292 return 0;
3293 }
3294 case MSR_EFER:
3295 return set_efer(vcpu, msr_info);
3296 case MSR_K7_HWCR:
3297 data &= ~(u64)0x40;
3298 data &= ~(u64)0x100;
3299 data &= ~(u64)0x8;
3300
3301
3302 if (data == BIT_ULL(18)) {
3303 vcpu->arch.msr_hwcr = data;
3304 } else if (data != 0) {
3305 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
3306 data);
3307 return 1;
3308 }
3309 break;
3310 case MSR_FAM10H_MMIO_CONF_BASE:
3311 if (data != 0) {
3312 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
3313 "0x%llx\n", data);
3314 return 1;
3315 }
3316 break;
3317 case 0x200 ... 0x2ff:
3318 return kvm_mtrr_set_msr(vcpu, msr, data);
3319 case MSR_IA32_APICBASE:
3320 return kvm_set_apic_base(vcpu, msr_info);
3321 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
3322 return kvm_x2apic_msr_write(vcpu, msr, data);
3323 case MSR_IA32_TSC_DEADLINE:
3324 kvm_set_lapic_tscdeadline_msr(vcpu, data);
3325 break;
3326 case MSR_IA32_TSC_ADJUST:
3327 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
3328 if (!msr_info->host_initiated) {
3329 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
3330 adjust_tsc_offset_guest(vcpu, adj);
3331
3332
3333
3334 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3335 }
3336 vcpu->arch.ia32_tsc_adjust_msr = data;
3337 }
3338 break;
3339 case MSR_IA32_MISC_ENABLE:
3340 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
3341 ((vcpu->arch.ia32_misc_enable_msr ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) {
3342 if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3))
3343 return 1;
3344 vcpu->arch.ia32_misc_enable_msr = data;
3345 kvm_update_cpuid_runtime(vcpu);
3346 } else {
3347 vcpu->arch.ia32_misc_enable_msr = data;
3348 }
3349 break;
3350 case MSR_IA32_SMBASE:
3351 if (!msr_info->host_initiated)
3352 return 1;
3353 vcpu->arch.smbase = data;
3354 break;
3355 case MSR_IA32_POWER_CTL:
3356 vcpu->arch.msr_ia32_power_ctl = data;
3357 break;
3358 case MSR_IA32_TSC:
3359 if (msr_info->host_initiated) {
3360 kvm_synchronize_tsc(vcpu, data);
3361 } else {
3362 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
3363 adjust_tsc_offset_guest(vcpu, adj);
3364 vcpu->arch.ia32_tsc_adjust_msr += adj;
3365 }
3366 break;
3367 case MSR_IA32_XSS:
3368 if (!msr_info->host_initiated &&
3369 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
3370 return 1;
3371
3372
3373
3374
3375
3376 if (data & ~supported_xss)
3377 return 1;
3378 vcpu->arch.ia32_xss = data;
3379 break;
3380 case MSR_SMI_COUNT:
3381 if (!msr_info->host_initiated)
3382 return 1;
3383 vcpu->arch.smi_count = data;
3384 break;
3385 case MSR_KVM_WALL_CLOCK_NEW:
3386 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
3387 return 1;
3388
3389 vcpu->kvm->arch.wall_clock = data;
3390 kvm_write_wall_clock(vcpu->kvm, data, 0);
3391 break;
3392 case MSR_KVM_WALL_CLOCK:
3393 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
3394 return 1;
3395
3396 vcpu->kvm->arch.wall_clock = data;
3397 kvm_write_wall_clock(vcpu->kvm, data, 0);
3398 break;
3399 case MSR_KVM_SYSTEM_TIME_NEW:
3400 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
3401 return 1;
3402
3403 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated);
3404 break;
3405 case MSR_KVM_SYSTEM_TIME:
3406 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
3407 return 1;
3408
3409 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated);
3410 break;
3411 case MSR_KVM_ASYNC_PF_EN:
3412 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
3413 return 1;
3414
3415 if (kvm_pv_enable_async_pf(vcpu, data))
3416 return 1;
3417 break;
3418 case MSR_KVM_ASYNC_PF_INT:
3419 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
3420 return 1;
3421
3422 if (kvm_pv_enable_async_pf_int(vcpu, data))
3423 return 1;
3424 break;
3425 case MSR_KVM_ASYNC_PF_ACK:
3426 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
3427 return 1;
3428 if (data & 0x1) {
3429 vcpu->arch.apf.pageready_pending = false;
3430 kvm_check_async_pf_completion(vcpu);
3431 }
3432 break;
3433 case MSR_KVM_STEAL_TIME:
3434 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
3435 return 1;
3436
3437 if (unlikely(!sched_info_on()))
3438 return 1;
3439
3440 if (data & KVM_STEAL_RESERVED_MASK)
3441 return 1;
3442
3443 vcpu->arch.st.msr_val = data;
3444
3445 if (!(data & KVM_MSR_ENABLED))
3446 break;
3447
3448 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
3449
3450 break;
3451 case MSR_KVM_PV_EOI_EN:
3452 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
3453 return 1;
3454
3455 if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8)))
3456 return 1;
3457 break;
3458
3459 case MSR_KVM_POLL_CONTROL:
3460 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
3461 return 1;
3462
3463
3464 if (data & (-1ULL << 1))
3465 return 1;
3466
3467 vcpu->arch.msr_kvm_poll_control = data;
3468 break;
3469
3470 case MSR_IA32_MCG_CTL:
3471 case MSR_IA32_MCG_STATUS:
3472 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
3473 return set_msr_mce(vcpu, msr_info);
3474
3475 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
3476 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
3477 pr = true;
3478 fallthrough;
3479 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
3480 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
3481 if (kvm_pmu_is_valid_msr(vcpu, msr))
3482 return kvm_pmu_set_msr(vcpu, msr_info);
3483
3484 if (pr || data != 0)
3485 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
3486 "0x%x data 0x%llx\n", msr, data);
3487 break;
3488 case MSR_K7_CLK_CTL:
3489
3490
3491
3492
3493
3494
3495
3496
3497 break;
3498 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
3499 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
3500 case HV_X64_MSR_SYNDBG_OPTIONS:
3501 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
3502 case HV_X64_MSR_CRASH_CTL:
3503 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
3504 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
3505 case HV_X64_MSR_TSC_EMULATION_CONTROL:
3506 case HV_X64_MSR_TSC_EMULATION_STATUS:
3507 return kvm_hv_set_msr_common(vcpu, msr, data,
3508 msr_info->host_initiated);
3509 case MSR_IA32_BBL_CR_CTL3:
3510
3511
3512
3513 if (report_ignored_msrs)
3514 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
3515 msr, data);
3516 break;
3517 case MSR_AMD64_OSVW_ID_LENGTH:
3518 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
3519 return 1;
3520 vcpu->arch.osvw.length = data;
3521 break;
3522 case MSR_AMD64_OSVW_STATUS:
3523 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
3524 return 1;
3525 vcpu->arch.osvw.status = data;
3526 break;
3527 case MSR_PLATFORM_INFO:
3528 if (!msr_info->host_initiated ||
3529 (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
3530 cpuid_fault_enabled(vcpu)))
3531 return 1;
3532 vcpu->arch.msr_platform_info = data;
3533 break;
3534 case MSR_MISC_FEATURES_ENABLES:
3535 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
3536 (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3537 !supports_cpuid_fault(vcpu)))
3538 return 1;
3539 vcpu->arch.msr_misc_features_enables = data;
3540 break;
3541 default:
3542 if (kvm_pmu_is_valid_msr(vcpu, msr))
3543 return kvm_pmu_set_msr(vcpu, msr_info);
3544 return KVM_MSR_RET_INVALID;
3545 }
3546 return 0;
3547}
3548EXPORT_SYMBOL_GPL(kvm_set_msr_common);
3549
3550static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
3551{
3552 u64 data;
3553 u64 mcg_cap = vcpu->arch.mcg_cap;
3554 unsigned bank_num = mcg_cap & 0xff;
3555
3556 switch (msr) {
3557 case MSR_IA32_P5_MC_ADDR:
3558 case MSR_IA32_P5_MC_TYPE:
3559 data = 0;
3560 break;
3561 case MSR_IA32_MCG_CAP:
3562 data = vcpu->arch.mcg_cap;
3563 break;
3564 case MSR_IA32_MCG_CTL:
3565 if (!(mcg_cap & MCG_CTL_P) && !host)
3566 return 1;
3567 data = vcpu->arch.mcg_ctl;
3568 break;
3569 case MSR_IA32_MCG_STATUS:
3570 data = vcpu->arch.mcg_status;
3571 break;
3572 default:
3573 if (msr >= MSR_IA32_MC0_CTL &&
3574 msr < MSR_IA32_MCx_CTL(bank_num)) {
3575 u32 offset = array_index_nospec(
3576 msr - MSR_IA32_MC0_CTL,
3577 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
3578
3579 data = vcpu->arch.mce_banks[offset];
3580 break;
3581 }
3582 return 1;
3583 }
3584 *pdata = data;
3585 return 0;
3586}
3587
3588int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3589{
3590 switch (msr_info->index) {
3591 case MSR_IA32_PLATFORM_ID:
3592 case MSR_IA32_EBL_CR_POWERON:
3593 case MSR_IA32_LASTBRANCHFROMIP:
3594 case MSR_IA32_LASTBRANCHTOIP:
3595 case MSR_IA32_LASTINTFROMIP:
3596 case MSR_IA32_LASTINTTOIP:
3597 case MSR_AMD64_SYSCFG:
3598 case MSR_K8_TSEG_ADDR:
3599 case MSR_K8_TSEG_MASK:
3600 case MSR_VM_HSAVE_PA:
3601 case MSR_K8_INT_PENDING_MSG:
3602 case MSR_AMD64_NB_CFG:
3603 case MSR_FAM10H_MMIO_CONF_BASE:
3604 case MSR_AMD64_BU_CFG2:
3605 case MSR_IA32_PERF_CTL:
3606 case MSR_AMD64_DC_CFG:
3607 case MSR_F15H_EX_CFG:
3608
3609
3610
3611
3612
3613
3614 case MSR_RAPL_POWER_UNIT:
3615 case MSR_PP0_ENERGY_STATUS:
3616 case MSR_PP1_ENERGY_STATUS:
3617 case MSR_PKG_ENERGY_STATUS:
3618 case MSR_DRAM_ENERGY_STATUS:
3619 msr_info->data = 0;
3620 break;
3621 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
3622 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
3623 return kvm_pmu_get_msr(vcpu, msr_info);
3624 if (!msr_info->host_initiated)
3625 return 1;
3626 msr_info->data = 0;
3627 break;
3628 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
3629 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
3630 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
3631 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
3632 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
3633 return kvm_pmu_get_msr(vcpu, msr_info);
3634 msr_info->data = 0;
3635 break;
3636 case MSR_IA32_UCODE_REV:
3637 msr_info->data = vcpu->arch.microcode_version;
3638 break;
3639 case MSR_IA32_ARCH_CAPABILITIES:
3640 if (!msr_info->host_initiated &&
3641 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
3642 return 1;
3643 msr_info->data = vcpu->arch.arch_capabilities;
3644 break;
3645 case MSR_IA32_PERF_CAPABILITIES:
3646 if (!msr_info->host_initiated &&
3647 !guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
3648 return 1;
3649 msr_info->data = vcpu->arch.perf_capabilities;
3650 break;
3651 case MSR_IA32_POWER_CTL:
3652 msr_info->data = vcpu->arch.msr_ia32_power_ctl;
3653 break;
3654 case MSR_IA32_TSC: {
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664 u64 offset, ratio;
3665
3666 if (msr_info->host_initiated) {
3667 offset = vcpu->arch.l1_tsc_offset;
3668 ratio = vcpu->arch.l1_tsc_scaling_ratio;
3669 } else {
3670 offset = vcpu->arch.tsc_offset;
3671 ratio = vcpu->arch.tsc_scaling_ratio;
3672 }
3673
3674 msr_info->data = kvm_scale_tsc(vcpu, rdtsc(), ratio) + offset;
3675 break;
3676 }
3677 case MSR_MTRRcap:
3678 case 0x200 ... 0x2ff:
3679 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
3680 case 0xcd:
3681 msr_info->data = 3;
3682 break;
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694 case MSR_EBC_FREQUENCY_ID:
3695 msr_info->data = 1 << 24;
3696 break;
3697 case MSR_IA32_APICBASE:
3698 msr_info->data = kvm_get_apic_base(vcpu);
3699 break;
3700 case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
3701 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
3702 case MSR_IA32_TSC_DEADLINE:
3703 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
3704 break;
3705 case MSR_IA32_TSC_ADJUST:
3706 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
3707 break;
3708 case MSR_IA32_MISC_ENABLE:
3709 msr_info->data = vcpu->arch.ia32_misc_enable_msr;
3710 break;
3711 case MSR_IA32_SMBASE:
3712 if (!msr_info->host_initiated)
3713 return 1;
3714 msr_info->data = vcpu->arch.smbase;
3715 break;
3716 case MSR_SMI_COUNT:
3717 msr_info->data = vcpu->arch.smi_count;
3718 break;
3719 case MSR_IA32_PERF_STATUS:
3720
3721 msr_info->data = 1000ULL;
3722
3723 msr_info->data |= (((uint64_t)4ULL) << 40);
3724 break;
3725 case MSR_EFER:
3726 msr_info->data = vcpu->arch.efer;
3727 break;
3728 case MSR_KVM_WALL_CLOCK:
3729 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
3730 return 1;
3731
3732 msr_info->data = vcpu->kvm->arch.wall_clock;
3733 break;
3734 case MSR_KVM_WALL_CLOCK_NEW:
3735 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
3736 return 1;
3737
3738 msr_info->data = vcpu->kvm->arch.wall_clock;
3739 break;
3740 case MSR_KVM_SYSTEM_TIME:
3741 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
3742 return 1;
3743
3744 msr_info->data = vcpu->arch.time;
3745 break;
3746 case MSR_KVM_SYSTEM_TIME_NEW:
3747 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
3748 return 1;
3749
3750 msr_info->data = vcpu->arch.time;
3751 break;
3752 case MSR_KVM_ASYNC_PF_EN:
3753 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
3754 return 1;
3755
3756 msr_info->data = vcpu->arch.apf.msr_en_val;
3757 break;
3758 case MSR_KVM_ASYNC_PF_INT:
3759 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
3760 return 1;
3761
3762 msr_info->data = vcpu->arch.apf.msr_int_val;
3763 break;
3764 case MSR_KVM_ASYNC_PF_ACK:
3765 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
3766 return 1;
3767
3768 msr_info->data = 0;
3769 break;
3770 case MSR_KVM_STEAL_TIME:
3771 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
3772 return 1;
3773
3774 msr_info->data = vcpu->arch.st.msr_val;
3775 break;
3776 case MSR_KVM_PV_EOI_EN:
3777 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
3778 return 1;
3779
3780 msr_info->data = vcpu->arch.pv_eoi.msr_val;
3781 break;
3782 case MSR_KVM_POLL_CONTROL:
3783 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
3784 return 1;
3785
3786 msr_info->data = vcpu->arch.msr_kvm_poll_control;
3787 break;
3788 case MSR_IA32_P5_MC_ADDR:
3789 case MSR_IA32_P5_MC_TYPE:
3790 case MSR_IA32_MCG_CAP:
3791 case MSR_IA32_MCG_CTL:
3792 case MSR_IA32_MCG_STATUS:
3793 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
3794 return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
3795 msr_info->host_initiated);
3796 case MSR_IA32_XSS:
3797 if (!msr_info->host_initiated &&
3798 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
3799 return 1;
3800 msr_info->data = vcpu->arch.ia32_xss;
3801 break;
3802 case MSR_K7_CLK_CTL:
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812 msr_info->data = 0x20000000;
3813 break;
3814 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
3815 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
3816 case HV_X64_MSR_SYNDBG_OPTIONS:
3817 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
3818 case HV_X64_MSR_CRASH_CTL:
3819 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
3820 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
3821 case HV_X64_MSR_TSC_EMULATION_CONTROL:
3822 case HV_X64_MSR_TSC_EMULATION_STATUS:
3823 return kvm_hv_get_msr_common(vcpu,
3824 msr_info->index, &msr_info->data,
3825 msr_info->host_initiated);
3826 case MSR_IA32_BBL_CR_CTL3:
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837 msr_info->data = 0xbe702111;
3838 break;
3839 case MSR_AMD64_OSVW_ID_LENGTH:
3840 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
3841 return 1;
3842 msr_info->data = vcpu->arch.osvw.length;
3843 break;
3844 case MSR_AMD64_OSVW_STATUS:
3845 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
3846 return 1;
3847 msr_info->data = vcpu->arch.osvw.status;
3848 break;
3849 case MSR_PLATFORM_INFO:
3850 if (!msr_info->host_initiated &&
3851 !vcpu->kvm->arch.guest_can_read_msr_platform_info)
3852 return 1;
3853 msr_info->data = vcpu->arch.msr_platform_info;
3854 break;
3855 case MSR_MISC_FEATURES_ENABLES:
3856 msr_info->data = vcpu->arch.msr_misc_features_enables;
3857 break;
3858 case MSR_K7_HWCR:
3859 msr_info->data = vcpu->arch.msr_hwcr;
3860 break;
3861 default:
3862 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
3863 return kvm_pmu_get_msr(vcpu, msr_info);
3864 return KVM_MSR_RET_INVALID;
3865 }
3866 return 0;
3867}
3868EXPORT_SYMBOL_GPL(kvm_get_msr_common);
3869
3870
3871
3872
3873
3874
3875static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
3876 struct kvm_msr_entry *entries,
3877 int (*do_msr)(struct kvm_vcpu *vcpu,
3878 unsigned index, u64 *data))
3879{
3880 int i;
3881
3882 for (i = 0; i < msrs->nmsrs; ++i)
3883 if (do_msr(vcpu, entries[i].index, &entries[i].data))
3884 break;
3885
3886 return i;
3887}
3888
3889
3890
3891
3892
3893
3894static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
3895 int (*do_msr)(struct kvm_vcpu *vcpu,
3896 unsigned index, u64 *data),
3897 int writeback)
3898{
3899 struct kvm_msrs msrs;
3900 struct kvm_msr_entry *entries;
3901 int r, n;
3902 unsigned size;
3903
3904 r = -EFAULT;
3905 if (copy_from_user(&msrs, user_msrs, sizeof(msrs)))
3906 goto out;
3907
3908 r = -E2BIG;
3909 if (msrs.nmsrs >= MAX_IO_MSRS)
3910 goto out;
3911
3912 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
3913 entries = memdup_user(user_msrs->entries, size);
3914 if (IS_ERR(entries)) {
3915 r = PTR_ERR(entries);
3916 goto out;
3917 }
3918
3919 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
3920 if (r < 0)
3921 goto out_free;
3922
3923 r = -EFAULT;
3924 if (writeback && copy_to_user(user_msrs->entries, entries, size))
3925 goto out_free;
3926
3927 r = n;
3928
3929out_free:
3930 kfree(entries);
3931out:
3932 return r;
3933}
3934
3935static inline bool kvm_can_mwait_in_guest(void)
3936{
3937 return boot_cpu_has(X86_FEATURE_MWAIT) &&
3938 !boot_cpu_has_bug(X86_BUG_MONITOR) &&
3939 boot_cpu_has(X86_FEATURE_ARAT);
3940}
3941
3942static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
3943 struct kvm_cpuid2 __user *cpuid_arg)
3944{
3945 struct kvm_cpuid2 cpuid;
3946 int r;
3947
3948 r = -EFAULT;
3949 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
3950 return r;
3951
3952 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries);
3953 if (r)
3954 return r;
3955
3956 r = -EFAULT;
3957 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
3958 return r;
3959
3960 return 0;
3961}
3962
3963int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
3964{
3965 int r = 0;
3966
3967 switch (ext) {
3968 case KVM_CAP_IRQCHIP:
3969 case KVM_CAP_HLT:
3970 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
3971 case KVM_CAP_SET_TSS_ADDR:
3972 case KVM_CAP_EXT_CPUID:
3973 case KVM_CAP_EXT_EMUL_CPUID:
3974 case KVM_CAP_CLOCKSOURCE:
3975 case KVM_CAP_PIT:
3976 case KVM_CAP_NOP_IO_DELAY:
3977 case KVM_CAP_MP_STATE:
3978 case KVM_CAP_SYNC_MMU:
3979 case KVM_CAP_USER_NMI:
3980 case KVM_CAP_REINJECT_CONTROL:
3981 case KVM_CAP_IRQ_INJECT_STATUS:
3982 case KVM_CAP_IOEVENTFD:
3983 case KVM_CAP_IOEVENTFD_NO_LENGTH:
3984 case KVM_CAP_PIT2:
3985 case KVM_CAP_PIT_STATE2:
3986 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
3987 case KVM_CAP_VCPU_EVENTS:
3988 case KVM_CAP_HYPERV:
3989 case KVM_CAP_HYPERV_VAPIC:
3990 case KVM_CAP_HYPERV_SPIN:
3991 case KVM_CAP_HYPERV_SYNIC:
3992 case KVM_CAP_HYPERV_SYNIC2:
3993 case KVM_CAP_HYPERV_VP_INDEX:
3994 case KVM_CAP_HYPERV_EVENTFD:
3995 case KVM_CAP_HYPERV_TLBFLUSH:
3996 case KVM_CAP_HYPERV_SEND_IPI:
3997 case KVM_CAP_HYPERV_CPUID:
3998 case KVM_CAP_HYPERV_ENFORCE_CPUID:
3999 case KVM_CAP_SYS_HYPERV_CPUID:
4000 case KVM_CAP_PCI_SEGMENT:
4001 case KVM_CAP_DEBUGREGS:
4002 case KVM_CAP_X86_ROBUST_SINGLESTEP:
4003 case KVM_CAP_XSAVE:
4004 case KVM_CAP_ASYNC_PF:
4005 case KVM_CAP_ASYNC_PF_INT:
4006 case KVM_CAP_GET_TSC_KHZ:
4007 case KVM_CAP_KVMCLOCK_CTRL:
4008 case KVM_CAP_READONLY_MEM:
4009 case KVM_CAP_HYPERV_TIME:
4010 case KVM_CAP_IOAPIC_POLARITY_IGNORED:
4011 case KVM_CAP_TSC_DEADLINE_TIMER:
4012 case KVM_CAP_DISABLE_QUIRKS:
4013 case KVM_CAP_SET_BOOT_CPU_ID:
4014 case KVM_CAP_SPLIT_IRQCHIP:
4015 case KVM_CAP_IMMEDIATE_EXIT:
4016 case KVM_CAP_PMU_EVENT_FILTER:
4017 case KVM_CAP_GET_MSR_FEATURES:
4018 case KVM_CAP_MSR_PLATFORM_INFO:
4019 case KVM_CAP_EXCEPTION_PAYLOAD:
4020 case KVM_CAP_SET_GUEST_DEBUG:
4021 case KVM_CAP_LAST_CPU:
4022 case KVM_CAP_X86_USER_SPACE_MSR:
4023 case KVM_CAP_X86_MSR_FILTER:
4024 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
4025#ifdef CONFIG_X86_SGX_KVM
4026 case KVM_CAP_SGX_ATTRIBUTE:
4027#endif
4028 case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
4029 case KVM_CAP_SREGS2:
4030 case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
4031 r = 1;
4032 break;
4033 case KVM_CAP_EXIT_HYPERCALL:
4034 r = KVM_EXIT_HYPERCALL_VALID_MASK;
4035 break;
4036 case KVM_CAP_SET_GUEST_DEBUG2:
4037 return KVM_GUESTDBG_VALID_MASK;
4038#ifdef CONFIG_KVM_XEN
4039 case KVM_CAP_XEN_HVM:
4040 r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR |
4041 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
4042 KVM_XEN_HVM_CONFIG_SHARED_INFO;
4043 if (sched_info_on())
4044 r |= KVM_XEN_HVM_CONFIG_RUNSTATE;
4045 break;
4046#endif
4047 case KVM_CAP_SYNC_REGS:
4048 r = KVM_SYNC_X86_VALID_FIELDS;
4049 break;
4050 case KVM_CAP_ADJUST_CLOCK:
4051 r = KVM_CLOCK_TSC_STABLE;
4052 break;
4053 case KVM_CAP_X86_DISABLE_EXITS:
4054 r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE |
4055 KVM_X86_DISABLE_EXITS_CSTATE;
4056 if(kvm_can_mwait_in_guest())
4057 r |= KVM_X86_DISABLE_EXITS_MWAIT;
4058 break;
4059 case KVM_CAP_X86_SMM:
4060
4061
4062
4063
4064
4065
4066
4067
4068 r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE);
4069 break;
4070 case KVM_CAP_VAPIC:
4071 r = !static_call(kvm_x86_cpu_has_accelerated_tpr)();
4072 break;
4073 case KVM_CAP_NR_VCPUS:
4074 r = KVM_SOFT_MAX_VCPUS;
4075 break;
4076 case KVM_CAP_MAX_VCPUS:
4077 r = KVM_MAX_VCPUS;
4078 break;
4079 case KVM_CAP_MAX_VCPU_ID:
4080 r = KVM_MAX_VCPU_ID;
4081 break;
4082 case KVM_CAP_PV_MMU:
4083 r = 0;
4084 break;
4085 case KVM_CAP_MCE:
4086 r = KVM_MAX_MCE_BANKS;
4087 break;
4088 case KVM_CAP_XCRS:
4089 r = boot_cpu_has(X86_FEATURE_XSAVE);
4090 break;
4091 case KVM_CAP_TSC_CONTROL:
4092 r = kvm_has_tsc_control;
4093 break;
4094 case KVM_CAP_X2APIC_API:
4095 r = KVM_X2APIC_API_VALID_FLAGS;
4096 break;
4097 case KVM_CAP_NESTED_STATE:
4098 r = kvm_x86_ops.nested_ops->get_state ?
4099 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0;
4100 break;
4101 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
4102 r = kvm_x86_ops.enable_direct_tlbflush != NULL;
4103 break;
4104 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
4105 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL;
4106 break;
4107 case KVM_CAP_SMALLER_MAXPHYADDR:
4108 r = (int) allow_smaller_maxphyaddr;
4109 break;
4110 case KVM_CAP_STEAL_TIME:
4111 r = sched_info_on();
4112 break;
4113 case KVM_CAP_X86_BUS_LOCK_EXIT:
4114 if (kvm_has_bus_lock_exit)
4115 r = KVM_BUS_LOCK_DETECTION_OFF |
4116 KVM_BUS_LOCK_DETECTION_EXIT;
4117 else
4118 r = 0;
4119 break;
4120 default:
4121 break;
4122 }
4123 return r;
4124
4125}
4126
4127long kvm_arch_dev_ioctl(struct file *filp,
4128 unsigned int ioctl, unsigned long arg)
4129{
4130 void __user *argp = (void __user *)arg;
4131 long r;
4132
4133 switch (ioctl) {
4134 case KVM_GET_MSR_INDEX_LIST: {
4135 struct kvm_msr_list __user *user_msr_list = argp;
4136 struct kvm_msr_list msr_list;
4137 unsigned n;
4138
4139 r = -EFAULT;
4140 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
4141 goto out;
4142 n = msr_list.nmsrs;
4143 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
4144 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
4145 goto out;
4146 r = -E2BIG;
4147 if (n < msr_list.nmsrs)
4148 goto out;
4149 r = -EFAULT;
4150 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
4151 num_msrs_to_save * sizeof(u32)))
4152 goto out;
4153 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
4154 &emulated_msrs,
4155 num_emulated_msrs * sizeof(u32)))
4156 goto out;
4157 r = 0;
4158 break;
4159 }
4160 case KVM_GET_SUPPORTED_CPUID:
4161 case KVM_GET_EMULATED_CPUID: {
4162 struct kvm_cpuid2 __user *cpuid_arg = argp;
4163 struct kvm_cpuid2 cpuid;
4164
4165 r = -EFAULT;
4166 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
4167 goto out;
4168
4169 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
4170 ioctl);
4171 if (r)
4172 goto out;
4173
4174 r = -EFAULT;
4175 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
4176 goto out;
4177 r = 0;
4178 break;
4179 }
4180 case KVM_X86_GET_MCE_CAP_SUPPORTED:
4181 r = -EFAULT;
4182 if (copy_to_user(argp, &kvm_mce_cap_supported,
4183 sizeof(kvm_mce_cap_supported)))
4184 goto out;
4185 r = 0;
4186 break;
4187 case KVM_GET_MSR_FEATURE_INDEX_LIST: {
4188 struct kvm_msr_list __user *user_msr_list = argp;
4189 struct kvm_msr_list msr_list;
4190 unsigned int n;
4191
4192 r = -EFAULT;
4193 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
4194 goto out;
4195 n = msr_list.nmsrs;
4196 msr_list.nmsrs = num_msr_based_features;
4197 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
4198 goto out;
4199 r = -E2BIG;
4200 if (n < msr_list.nmsrs)
4201 goto out;
4202 r = -EFAULT;
4203 if (copy_to_user(user_msr_list->indices, &msr_based_features,
4204 num_msr_based_features * sizeof(u32)))
4205 goto out;
4206 r = 0;
4207 break;
4208 }
4209 case KVM_GET_MSRS:
4210 r = msr_io(NULL, argp, do_get_msr_feature, 1);
4211 break;
4212 case KVM_GET_SUPPORTED_HV_CPUID:
4213 r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp);
4214 break;
4215 default:
4216 r = -EINVAL;
4217 break;
4218 }
4219out:
4220 return r;
4221}
4222
4223static void wbinvd_ipi(void *garbage)
4224{
4225 wbinvd();
4226}
4227
4228static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
4229{
4230 return kvm_arch_has_noncoherent_dma(vcpu->kvm);
4231}
4232
4233void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
4234{
4235
4236 if (need_emulate_wbinvd(vcpu)) {
4237 if (static_call(kvm_x86_has_wbinvd_exit)())
4238 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
4239 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
4240 smp_call_function_single(vcpu->cpu,
4241 wbinvd_ipi, NULL, 1);
4242 }
4243
4244 static_call(kvm_x86_vcpu_load)(vcpu, cpu);
4245
4246
4247 vcpu->arch.host_pkru = read_pkru();
4248
4249
4250 if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
4251 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
4252 vcpu->arch.tsc_offset_adjustment = 0;
4253 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
4254 }
4255
4256 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
4257 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
4258 rdtsc() - vcpu->arch.last_host_tsc;
4259 if (tsc_delta < 0)
4260 mark_tsc_unstable("KVM discovered backwards TSC");
4261
4262 if (kvm_check_tsc_unstable()) {
4263 u64 offset = kvm_compute_l1_tsc_offset(vcpu,
4264 vcpu->arch.last_guest_tsc);
4265 kvm_vcpu_write_tsc_offset(vcpu, offset);
4266 vcpu->arch.tsc_catchup = 1;
4267 }
4268
4269 if (kvm_lapic_hv_timer_in_use(vcpu))
4270 kvm_lapic_restart_hv_timer(vcpu);
4271
4272
4273
4274
4275
4276 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
4277 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
4278 if (vcpu->cpu != cpu)
4279 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
4280 vcpu->cpu = cpu;
4281 }
4282
4283 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
4284}
4285
4286static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
4287{
4288 struct kvm_host_map map;
4289 struct kvm_steal_time *st;
4290
4291 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
4292 return;
4293
4294 if (vcpu->arch.st.preempted)
4295 return;
4296
4297 if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
4298 &vcpu->arch.st.cache, true))
4299 return;
4300
4301 st = map.hva +
4302 offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
4303
4304 st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
4305
4306 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
4307}
4308
4309void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
4310{
4311 int idx;
4312
4313 if (vcpu->preempted && !vcpu->arch.guest_state_protected)
4314 vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
4315
4316
4317
4318
4319
4320 idx = srcu_read_lock(&vcpu->kvm->srcu);
4321 if (kvm_xen_msr_enabled(vcpu->kvm))
4322 kvm_xen_runstate_set_preempted(vcpu);
4323 else
4324 kvm_steal_time_set_preempted(vcpu);
4325 srcu_read_unlock(&vcpu->kvm->srcu, idx);
4326
4327 static_call(kvm_x86_vcpu_put)(vcpu);
4328 vcpu->arch.last_host_tsc = rdtsc();
4329}
4330
4331static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
4332 struct kvm_lapic_state *s)
4333{
4334 if (vcpu->arch.apicv_active)
4335 static_call(kvm_x86_sync_pir_to_irr)(vcpu);
4336
4337 return kvm_apic_get_state(vcpu, s);
4338}
4339
4340static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
4341 struct kvm_lapic_state *s)
4342{
4343 int r;
4344
4345 r = kvm_apic_set_state(vcpu, s);
4346 if (r)
4347 return r;
4348 update_cr8_intercept(vcpu);
4349
4350 return 0;
4351}
4352
4353static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
4354{
4355
4356
4357
4358
4359
4360
4361 if (kvm_cpu_has_extint(vcpu))
4362 return false;
4363
4364
4365 return (!lapic_in_kernel(vcpu) ||
4366 kvm_apic_accept_pic_intr(vcpu));
4367}
4368
4369static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
4370{
4371
4372
4373
4374
4375
4376
4377
4378 return (kvm_arch_interrupt_allowed(vcpu) &&
4379 kvm_cpu_accept_dm_intr(vcpu) &&
4380 !kvm_event_needs_reinjection(vcpu) &&
4381 !vcpu->arch.exception.pending);
4382}
4383
4384static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
4385 struct kvm_interrupt *irq)
4386{
4387 if (irq->irq >= KVM_NR_INTERRUPTS)
4388 return -EINVAL;
4389
4390 if (!irqchip_in_kernel(vcpu->kvm)) {
4391 kvm_queue_interrupt(vcpu, irq->irq, false);
4392 kvm_make_request(KVM_REQ_EVENT, vcpu);
4393 return 0;
4394 }
4395
4396
4397
4398
4399
4400 if (pic_in_kernel(vcpu->kvm))
4401 return -ENXIO;
4402
4403 if (vcpu->arch.pending_external_vector != -1)
4404 return -EEXIST;
4405
4406 vcpu->arch.pending_external_vector = irq->irq;
4407 kvm_make_request(KVM_REQ_EVENT, vcpu);
4408 return 0;
4409}
4410
4411static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
4412{
4413 kvm_inject_nmi(vcpu);
4414
4415 return 0;
4416}
4417
4418static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
4419{
4420 kvm_make_request(KVM_REQ_SMI, vcpu);
4421
4422 return 0;
4423}
4424
4425static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
4426 struct kvm_tpr_access_ctl *tac)
4427{
4428 if (tac->flags)
4429 return -EINVAL;
4430 vcpu->arch.tpr_access_reporting = !!tac->enabled;
4431 return 0;
4432}
4433
4434static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
4435 u64 mcg_cap)
4436{
4437 int r;
4438 unsigned bank_num = mcg_cap & 0xff, bank;
4439
4440 r = -EINVAL;
4441 if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
4442 goto out;
4443 if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
4444 goto out;
4445 r = 0;
4446 vcpu->arch.mcg_cap = mcg_cap;
4447
4448 if (mcg_cap & MCG_CTL_P)
4449 vcpu->arch.mcg_ctl = ~(u64)0;
4450
4451 for (bank = 0; bank < bank_num; bank++)
4452 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
4453
4454 static_call(kvm_x86_setup_mce)(vcpu);
4455out:
4456 return r;
4457}
4458
4459static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
4460 struct kvm_x86_mce *mce)
4461{
4462 u64 mcg_cap = vcpu->arch.mcg_cap;
4463 unsigned bank_num = mcg_cap & 0xff;
4464 u64 *banks = vcpu->arch.mce_banks;
4465
4466 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
4467 return -EINVAL;
4468
4469
4470
4471
4472 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
4473 vcpu->arch.mcg_ctl != ~(u64)0)
4474 return 0;
4475 banks += 4 * mce->bank;
4476
4477
4478
4479
4480 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
4481 return 0;
4482 if (mce->status & MCI_STATUS_UC) {
4483 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
4484 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
4485 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4486 return 0;
4487 }
4488 if (banks[1] & MCI_STATUS_VAL)
4489 mce->status |= MCI_STATUS_OVER;
4490 banks[2] = mce->addr;
4491 banks[3] = mce->misc;
4492 vcpu->arch.mcg_status = mce->mcg_status;
4493 banks[1] = mce->status;
4494 kvm_queue_exception(vcpu, MC_VECTOR);
4495 } else if (!(banks[1] & MCI_STATUS_VAL)
4496 || !(banks[1] & MCI_STATUS_UC)) {
4497 if (banks[1] & MCI_STATUS_VAL)
4498 mce->status |= MCI_STATUS_OVER;
4499 banks[2] = mce->addr;
4500 banks[3] = mce->misc;
4501 banks[1] = mce->status;
4502 } else
4503 banks[1] |= MCI_STATUS_OVER;
4504 return 0;
4505}
4506
4507static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
4508 struct kvm_vcpu_events *events)
4509{
4510 process_nmi(vcpu);
4511
4512 if (kvm_check_request(KVM_REQ_SMI, vcpu))
4513 process_smi(vcpu);
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526 if (!vcpu->kvm->arch.exception_payload_enabled &&
4527 vcpu->arch.exception.pending && vcpu->arch.exception.has_payload)
4528 kvm_deliver_exception_payload(vcpu);
4529
4530
4531
4532
4533
4534
4535
4536 if (kvm_exception_is_soft(vcpu->arch.exception.nr)) {
4537 events->exception.injected = 0;
4538 events->exception.pending = 0;
4539 } else {
4540 events->exception.injected = vcpu->arch.exception.injected;
4541 events->exception.pending = vcpu->arch.exception.pending;
4542
4543
4544
4545
4546
4547 if (!vcpu->kvm->arch.exception_payload_enabled)
4548 events->exception.injected |=
4549 vcpu->arch.exception.pending;
4550 }
4551 events->exception.nr = vcpu->arch.exception.nr;
4552 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
4553 events->exception.error_code = vcpu->arch.exception.error_code;
4554 events->exception_has_payload = vcpu->arch.exception.has_payload;
4555 events->exception_payload = vcpu->arch.exception.payload;
4556
4557 events->interrupt.injected =
4558 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
4559 events->interrupt.nr = vcpu->arch.interrupt.nr;
4560 events->interrupt.soft = 0;
4561 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
4562
4563 events->nmi.injected = vcpu->arch.nmi_injected;
4564 events->nmi.pending = vcpu->arch.nmi_pending != 0;
4565 events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu);
4566 events->nmi.pad = 0;
4567
4568 events->sipi_vector = 0;
4569
4570 events->smi.smm = is_smm(vcpu);
4571 events->smi.pending = vcpu->arch.smi_pending;
4572 events->smi.smm_inside_nmi =
4573 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
4574 events->smi.latched_init = kvm_lapic_latched_init(vcpu);
4575
4576 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
4577 | KVM_VCPUEVENT_VALID_SHADOW
4578 | KVM_VCPUEVENT_VALID_SMM);
4579 if (vcpu->kvm->arch.exception_payload_enabled)
4580 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
4581
4582 memset(&events->reserved, 0, sizeof(events->reserved));
4583}
4584
4585static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm);
4586
4587static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
4588 struct kvm_vcpu_events *events)
4589{
4590 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
4591 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
4592 | KVM_VCPUEVENT_VALID_SHADOW
4593 | KVM_VCPUEVENT_VALID_SMM
4594 | KVM_VCPUEVENT_VALID_PAYLOAD))
4595 return -EINVAL;
4596
4597 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
4598 if (!vcpu->kvm->arch.exception_payload_enabled)
4599 return -EINVAL;
4600 if (events->exception.pending)
4601 events->exception.injected = 0;
4602 else
4603 events->exception_has_payload = 0;
4604 } else {
4605 events->exception.pending = 0;
4606 events->exception_has_payload = 0;
4607 }
4608
4609 if ((events->exception.injected || events->exception.pending) &&
4610 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
4611 return -EINVAL;
4612
4613
4614 if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
4615 (events->smi.smm || events->smi.pending) &&
4616 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
4617 return -EINVAL;
4618
4619 process_nmi(vcpu);
4620 vcpu->arch.exception.injected = events->exception.injected;
4621 vcpu->arch.exception.pending = events->exception.pending;
4622 vcpu->arch.exception.nr = events->exception.nr;
4623 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
4624 vcpu->arch.exception.error_code = events->exception.error_code;
4625 vcpu->arch.exception.has_payload = events->exception_has_payload;
4626 vcpu->arch.exception.payload = events->exception_payload;
4627
4628 vcpu->arch.interrupt.injected = events->interrupt.injected;
4629 vcpu->arch.interrupt.nr = events->interrupt.nr;
4630 vcpu->arch.interrupt.soft = events->interrupt.soft;
4631 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
4632 static_call(kvm_x86_set_interrupt_shadow)(vcpu,
4633 events->interrupt.shadow);
4634
4635 vcpu->arch.nmi_injected = events->nmi.injected;
4636 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
4637 vcpu->arch.nmi_pending = events->nmi.pending;
4638 static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked);
4639
4640 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
4641 lapic_in_kernel(vcpu))
4642 vcpu->arch.apic->sipi_vector = events->sipi_vector;
4643
4644 if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
4645 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm)
4646 kvm_smm_changed(vcpu, events->smi.smm);
4647
4648 vcpu->arch.smi_pending = events->smi.pending;
4649
4650 if (events->smi.smm) {
4651 if (events->smi.smm_inside_nmi)
4652 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
4653 else
4654 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
4655 }
4656
4657 if (lapic_in_kernel(vcpu)) {
4658 if (events->smi.latched_init)
4659 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
4660 else
4661 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
4662 }
4663 }
4664
4665 kvm_make_request(KVM_REQ_EVENT, vcpu);
4666
4667 return 0;
4668}
4669
4670static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
4671 struct kvm_debugregs *dbgregs)
4672{
4673 unsigned long val;
4674
4675 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
4676 kvm_get_dr(vcpu, 6, &val);
4677 dbgregs->dr6 = val;
4678 dbgregs->dr7 = vcpu->arch.dr7;
4679 dbgregs->flags = 0;
4680 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
4681}
4682
4683static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
4684 struct kvm_debugregs *dbgregs)
4685{
4686 if (dbgregs->flags)
4687 return -EINVAL;
4688
4689 if (!kvm_dr6_valid(dbgregs->dr6))
4690 return -EINVAL;
4691 if (!kvm_dr7_valid(dbgregs->dr7))
4692 return -EINVAL;
4693
4694 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
4695 kvm_update_dr0123(vcpu);
4696 vcpu->arch.dr6 = dbgregs->dr6;
4697 vcpu->arch.dr7 = dbgregs->dr7;
4698 kvm_update_dr7(vcpu);
4699
4700 return 0;
4701}
4702
4703#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
4704
4705static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
4706{
4707 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
4708 u64 xstate_bv = xsave->header.xfeatures;
4709 u64 valid;
4710
4711
4712
4713
4714
4715 memcpy(dest, xsave, XSAVE_HDR_OFFSET);
4716
4717
4718 xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
4719 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
4720
4721
4722
4723
4724
4725 valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
4726 while (valid) {
4727 u32 size, offset, ecx, edx;
4728 u64 xfeature_mask = valid & -valid;
4729 int xfeature_nr = fls64(xfeature_mask) - 1;
4730 void *src;
4731
4732 cpuid_count(XSTATE_CPUID, xfeature_nr,
4733 &size, &offset, &ecx, &edx);
4734
4735 if (xfeature_nr == XFEATURE_PKRU) {
4736 memcpy(dest + offset, &vcpu->arch.pkru,
4737 sizeof(vcpu->arch.pkru));
4738 } else {
4739 src = get_xsave_addr(xsave, xfeature_nr);
4740 if (src)
4741 memcpy(dest + offset, src, size);
4742 }
4743
4744 valid -= xfeature_mask;
4745 }
4746}
4747
4748static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
4749{
4750 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
4751 u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
4752 u64 valid;
4753
4754
4755
4756
4757
4758 memcpy(xsave, src, XSAVE_HDR_OFFSET);
4759
4760
4761 xsave->header.xfeatures = xstate_bv;
4762 if (boot_cpu_has(X86_FEATURE_XSAVES))
4763 xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
4764
4765
4766
4767
4768
4769 valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
4770 while (valid) {
4771 u32 size, offset, ecx, edx;
4772 u64 xfeature_mask = valid & -valid;
4773 int xfeature_nr = fls64(xfeature_mask) - 1;
4774
4775 cpuid_count(XSTATE_CPUID, xfeature_nr,
4776 &size, &offset, &ecx, &edx);
4777
4778 if (xfeature_nr == XFEATURE_PKRU) {
4779 memcpy(&vcpu->arch.pkru, src + offset,
4780 sizeof(vcpu->arch.pkru));
4781 } else {
4782 void *dest = get_xsave_addr(xsave, xfeature_nr);
4783
4784 if (dest)
4785 memcpy(dest, src + offset, size);
4786 }
4787
4788 valid -= xfeature_mask;
4789 }
4790}
4791
4792static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
4793 struct kvm_xsave *guest_xsave)
4794{
4795 if (!vcpu->arch.guest_fpu)
4796 return;
4797
4798 if (boot_cpu_has(X86_FEATURE_XSAVE)) {
4799 memset(guest_xsave, 0, sizeof(struct kvm_xsave));
4800 fill_xsave((u8 *) guest_xsave->region, vcpu);
4801 } else {
4802 memcpy(guest_xsave->region,
4803 &vcpu->arch.guest_fpu->state.fxsave,
4804 sizeof(struct fxregs_state));
4805 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
4806 XFEATURE_MASK_FPSSE;
4807 }
4808}
4809
4810#define XSAVE_MXCSR_OFFSET 24
4811
4812static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
4813 struct kvm_xsave *guest_xsave)
4814{
4815 u64 xstate_bv;
4816 u32 mxcsr;
4817
4818 if (!vcpu->arch.guest_fpu)
4819 return 0;
4820
4821 xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
4822 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
4823
4824 if (boot_cpu_has(X86_FEATURE_XSAVE)) {
4825
4826
4827
4828
4829
4830 if (xstate_bv & ~supported_xcr0 || mxcsr & ~mxcsr_feature_mask)
4831 return -EINVAL;
4832 load_xsave(vcpu, (u8 *)guest_xsave->region);
4833 } else {
4834 if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
4835 mxcsr & ~mxcsr_feature_mask)
4836 return -EINVAL;
4837 memcpy(&vcpu->arch.guest_fpu->state.fxsave,
4838 guest_xsave->region, sizeof(struct fxregs_state));
4839 }
4840 return 0;
4841}
4842
4843static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
4844 struct kvm_xcrs *guest_xcrs)
4845{
4846 if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
4847 guest_xcrs->nr_xcrs = 0;
4848 return;
4849 }
4850
4851 guest_xcrs->nr_xcrs = 1;
4852 guest_xcrs->flags = 0;
4853 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
4854 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
4855}
4856
4857static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
4858 struct kvm_xcrs *guest_xcrs)
4859{
4860 int i, r = 0;
4861
4862 if (!boot_cpu_has(X86_FEATURE_XSAVE))
4863 return -EINVAL;
4864
4865 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
4866 return -EINVAL;
4867
4868 for (i = 0; i < guest_xcrs->nr_xcrs; i++)
4869
4870 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
4871 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
4872 guest_xcrs->xcrs[i].value);
4873 break;
4874 }
4875 if (r)
4876 r = -EINVAL;
4877 return r;
4878}
4879
4880
4881
4882
4883
4884
4885
4886static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
4887{
4888 if (!vcpu->arch.pv_time_enabled)
4889 return -EINVAL;
4890 vcpu->arch.pvclock_set_guest_stopped_request = true;
4891 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
4892 return 0;
4893}
4894
4895static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4896 struct kvm_enable_cap *cap)
4897{
4898 int r;
4899 uint16_t vmcs_version;
4900 void __user *user_ptr;
4901
4902 if (cap->flags)
4903 return -EINVAL;
4904
4905 switch (cap->cap) {
4906 case KVM_CAP_HYPERV_SYNIC2:
4907 if (cap->args[0])
4908 return -EINVAL;
4909 fallthrough;
4910
4911 case KVM_CAP_HYPERV_SYNIC:
4912 if (!irqchip_in_kernel(vcpu->kvm))
4913 return -EINVAL;
4914 return kvm_hv_activate_synic(vcpu, cap->cap ==
4915 KVM_CAP_HYPERV_SYNIC2);
4916 case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
4917 if (!kvm_x86_ops.nested_ops->enable_evmcs)
4918 return -ENOTTY;
4919 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
4920 if (!r) {
4921 user_ptr = (void __user *)(uintptr_t)cap->args[0];
4922 if (copy_to_user(user_ptr, &vmcs_version,
4923 sizeof(vmcs_version)))
4924 r = -EFAULT;
4925 }
4926 return r;
4927 case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
4928 if (!kvm_x86_ops.enable_direct_tlbflush)
4929 return -ENOTTY;
4930
4931 return static_call(kvm_x86_enable_direct_tlbflush)(vcpu);
4932
4933 case KVM_CAP_HYPERV_ENFORCE_CPUID:
4934 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]);
4935
4936 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
4937 vcpu->arch.pv_cpuid.enforce = cap->args[0];
4938 if (vcpu->arch.pv_cpuid.enforce)
4939 kvm_update_pv_runtime(vcpu);
4940
4941 return 0;
4942 default:
4943 return -EINVAL;
4944 }
4945}
4946
4947long kvm_arch_vcpu_ioctl(struct file *filp,
4948 unsigned int ioctl, unsigned long arg)
4949{
4950 struct kvm_vcpu *vcpu = filp->private_data;
4951 void __user *argp = (void __user *)arg;
4952 int r;
4953 union {
4954 struct kvm_sregs2 *sregs2;
4955 struct kvm_lapic_state *lapic;
4956 struct kvm_xsave *xsave;
4957 struct kvm_xcrs *xcrs;
4958 void *buffer;
4959 } u;
4960
4961 vcpu_load(vcpu);
4962
4963 u.buffer = NULL;
4964 switch (ioctl) {
4965 case KVM_GET_LAPIC: {
4966 r = -EINVAL;
4967 if (!lapic_in_kernel(vcpu))
4968 goto out;
4969 u.lapic = kzalloc(sizeof(struct kvm_lapic_state),
4970 GFP_KERNEL_ACCOUNT);
4971
4972 r = -ENOMEM;
4973 if (!u.lapic)
4974 goto out;
4975 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
4976 if (r)
4977 goto out;
4978 r = -EFAULT;
4979 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
4980 goto out;
4981 r = 0;
4982 break;
4983 }
4984 case KVM_SET_LAPIC: {
4985 r = -EINVAL;
4986 if (!lapic_in_kernel(vcpu))
4987 goto out;
4988 u.lapic = memdup_user(argp, sizeof(*u.lapic));
4989 if (IS_ERR(u.lapic)) {
4990 r = PTR_ERR(u.lapic);
4991 goto out_nofree;
4992 }
4993
4994 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
4995 break;
4996 }
4997 case KVM_INTERRUPT: {
4998 struct kvm_interrupt irq;
4999
5000 r = -EFAULT;
5001 if (copy_from_user(&irq, argp, sizeof(irq)))
5002 goto out;
5003 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
5004 break;
5005 }
5006 case KVM_NMI: {
5007 r = kvm_vcpu_ioctl_nmi(vcpu);
5008 break;
5009 }
5010 case KVM_SMI: {
5011 r = kvm_vcpu_ioctl_smi(vcpu);
5012 break;
5013 }
5014 case KVM_SET_CPUID: {
5015 struct kvm_cpuid __user *cpuid_arg = argp;
5016 struct kvm_cpuid cpuid;
5017
5018 r = -EFAULT;
5019 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5020 goto out;
5021 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
5022 break;
5023 }
5024 case KVM_SET_CPUID2: {
5025 struct kvm_cpuid2 __user *cpuid_arg = argp;
5026 struct kvm_cpuid2 cpuid;
5027
5028 r = -EFAULT;
5029 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5030 goto out;
5031 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
5032 cpuid_arg->entries);
5033 break;
5034 }
5035 case KVM_GET_CPUID2: {
5036 struct kvm_cpuid2 __user *cpuid_arg = argp;
5037 struct kvm_cpuid2 cpuid;
5038
5039 r = -EFAULT;
5040 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5041 goto out;
5042 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
5043 cpuid_arg->entries);
5044 if (r)
5045 goto out;
5046 r = -EFAULT;
5047 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
5048 goto out;
5049 r = 0;
5050 break;
5051 }
5052 case KVM_GET_MSRS: {
5053 int idx = srcu_read_lock(&vcpu->kvm->srcu);
5054 r = msr_io(vcpu, argp, do_get_msr, 1);
5055 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5056 break;
5057 }
5058 case KVM_SET_MSRS: {
5059 int idx = srcu_read_lock(&vcpu->kvm->srcu);
5060 r = msr_io(vcpu, argp, do_set_msr, 0);
5061 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5062 break;
5063 }
5064 case KVM_TPR_ACCESS_REPORTING: {
5065 struct kvm_tpr_access_ctl tac;
5066
5067 r = -EFAULT;
5068 if (copy_from_user(&tac, argp, sizeof(tac)))
5069 goto out;
5070 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
5071 if (r)
5072 goto out;
5073 r = -EFAULT;
5074 if (copy_to_user(argp, &tac, sizeof(tac)))
5075 goto out;
5076 r = 0;
5077 break;
5078 };
5079 case KVM_SET_VAPIC_ADDR: {
5080 struct kvm_vapic_addr va;
5081 int idx;
5082
5083 r = -EINVAL;
5084 if (!lapic_in_kernel(vcpu))
5085 goto out;
5086 r = -EFAULT;
5087 if (copy_from_user(&va, argp, sizeof(va)))
5088 goto out;
5089 idx = srcu_read_lock(&vcpu->kvm->srcu);
5090 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
5091 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5092 break;
5093 }
5094 case KVM_X86_SETUP_MCE: {
5095 u64 mcg_cap;
5096
5097 r = -EFAULT;
5098 if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap)))
5099 goto out;
5100 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
5101 break;
5102 }
5103 case KVM_X86_SET_MCE: {
5104 struct kvm_x86_mce mce;
5105
5106 r = -EFAULT;
5107 if (copy_from_user(&mce, argp, sizeof(mce)))
5108 goto out;
5109 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
5110 break;
5111 }
5112 case KVM_GET_VCPU_EVENTS: {
5113 struct kvm_vcpu_events events;
5114
5115 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
5116
5117 r = -EFAULT;
5118 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
5119 break;
5120 r = 0;
5121 break;
5122 }
5123 case KVM_SET_VCPU_EVENTS: {
5124 struct kvm_vcpu_events events;
5125
5126 r = -EFAULT;
5127 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
5128 break;
5129
5130 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
5131 break;
5132 }
5133 case KVM_GET_DEBUGREGS: {
5134 struct kvm_debugregs dbgregs;
5135
5136 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
5137
5138 r = -EFAULT;
5139 if (copy_to_user(argp, &dbgregs,
5140 sizeof(struct kvm_debugregs)))
5141 break;
5142 r = 0;
5143 break;
5144 }
5145 case KVM_SET_DEBUGREGS: {
5146 struct kvm_debugregs dbgregs;
5147
5148 r = -EFAULT;
5149 if (copy_from_user(&dbgregs, argp,
5150 sizeof(struct kvm_debugregs)))
5151 break;
5152
5153 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
5154 break;
5155 }
5156 case KVM_GET_XSAVE: {
5157 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT);
5158 r = -ENOMEM;
5159 if (!u.xsave)
5160 break;
5161
5162 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
5163
5164 r = -EFAULT;
5165 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
5166 break;
5167 r = 0;
5168 break;
5169 }
5170 case KVM_SET_XSAVE: {
5171 u.xsave = memdup_user(argp, sizeof(*u.xsave));
5172 if (IS_ERR(u.xsave)) {
5173 r = PTR_ERR(u.xsave);
5174 goto out_nofree;
5175 }
5176
5177 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
5178 break;
5179 }
5180 case KVM_GET_XCRS: {
5181 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT);
5182 r = -ENOMEM;
5183 if (!u.xcrs)
5184 break;
5185
5186 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
5187
5188 r = -EFAULT;
5189 if (copy_to_user(argp, u.xcrs,
5190 sizeof(struct kvm_xcrs)))
5191 break;
5192 r = 0;
5193 break;
5194 }
5195 case KVM_SET_XCRS: {
5196 u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
5197 if (IS_ERR(u.xcrs)) {
5198 r = PTR_ERR(u.xcrs);
5199 goto out_nofree;
5200 }
5201
5202 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
5203 break;
5204 }
5205 case KVM_SET_TSC_KHZ: {
5206 u32 user_tsc_khz;
5207
5208 r = -EINVAL;
5209 user_tsc_khz = (u32)arg;
5210
5211 if (kvm_has_tsc_control &&
5212 user_tsc_khz >= kvm_max_guest_tsc_khz)
5213 goto out;
5214
5215 if (user_tsc_khz == 0)
5216 user_tsc_khz = tsc_khz;
5217
5218 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
5219 r = 0;
5220
5221 goto out;
5222 }
5223 case KVM_GET_TSC_KHZ: {
5224 r = vcpu->arch.virtual_tsc_khz;
5225 goto out;
5226 }
5227 case KVM_KVMCLOCK_CTRL: {
5228 r = kvm_set_guest_paused(vcpu);
5229 goto out;
5230 }
5231 case KVM_ENABLE_CAP: {
5232 struct kvm_enable_cap cap;
5233
5234 r = -EFAULT;
5235 if (copy_from_user(&cap, argp, sizeof(cap)))
5236 goto out;
5237 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5238 break;
5239 }
5240 case KVM_GET_NESTED_STATE: {
5241 struct kvm_nested_state __user *user_kvm_nested_state = argp;
5242 u32 user_data_size;
5243
5244 r = -EINVAL;
5245 if (!kvm_x86_ops.nested_ops->get_state)
5246 break;
5247
5248 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
5249 r = -EFAULT;
5250 if (get_user(user_data_size, &user_kvm_nested_state->size))
5251 break;
5252
5253 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state,
5254 user_data_size);
5255 if (r < 0)
5256 break;
5257
5258 if (r > user_data_size) {
5259 if (put_user(r, &user_kvm_nested_state->size))
5260 r = -EFAULT;
5261 else
5262 r = -E2BIG;
5263 break;
5264 }
5265
5266 r = 0;
5267 break;
5268 }
5269 case KVM_SET_NESTED_STATE: {
5270 struct kvm_nested_state __user *user_kvm_nested_state = argp;
5271 struct kvm_nested_state kvm_state;
5272 int idx;
5273
5274 r = -EINVAL;
5275 if (!kvm_x86_ops.nested_ops->set_state)
5276 break;
5277
5278 r = -EFAULT;
5279 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
5280 break;
5281
5282 r = -EINVAL;
5283 if (kvm_state.size < sizeof(kvm_state))
5284 break;
5285
5286 if (kvm_state.flags &
5287 ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE
5288 | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING
5289 | KVM_STATE_NESTED_GIF_SET))
5290 break;
5291
5292
5293 if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING)
5294 && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
5295 break;
5296
5297 idx = srcu_read_lock(&vcpu->kvm->srcu);
5298 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state);
5299 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5300 break;
5301 }
5302 case KVM_GET_SUPPORTED_HV_CPUID:
5303 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp);
5304 break;
5305#ifdef CONFIG_KVM_XEN
5306 case KVM_XEN_VCPU_GET_ATTR: {
5307 struct kvm_xen_vcpu_attr xva;
5308
5309 r = -EFAULT;
5310 if (copy_from_user(&xva, argp, sizeof(xva)))
5311 goto out;
5312 r = kvm_xen_vcpu_get_attr(vcpu, &xva);
5313 if (!r && copy_to_user(argp, &xva, sizeof(xva)))
5314 r = -EFAULT;
5315 break;
5316 }
5317 case KVM_XEN_VCPU_SET_ATTR: {
5318 struct kvm_xen_vcpu_attr xva;
5319
5320 r = -EFAULT;
5321 if (copy_from_user(&xva, argp, sizeof(xva)))
5322 goto out;
5323 r = kvm_xen_vcpu_set_attr(vcpu, &xva);
5324 break;
5325 }
5326#endif
5327 case KVM_GET_SREGS2: {
5328 u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL);
5329 r = -ENOMEM;
5330 if (!u.sregs2)
5331 goto out;
5332 __get_sregs2(vcpu, u.sregs2);
5333 r = -EFAULT;
5334 if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2)))
5335 goto out;
5336 r = 0;
5337 break;
5338 }
5339 case KVM_SET_SREGS2: {
5340 u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2));
5341 if (IS_ERR(u.sregs2)) {
5342 r = PTR_ERR(u.sregs2);
5343 u.sregs2 = NULL;
5344 goto out;
5345 }
5346 r = __set_sregs2(vcpu, u.sregs2);
5347 break;
5348 }
5349 default:
5350 r = -EINVAL;
5351 }
5352out:
5353 kfree(u.buffer);
5354out_nofree:
5355 vcpu_put(vcpu);
5356 return r;
5357}
5358
5359vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5360{
5361 return VM_FAULT_SIGBUS;
5362}
5363
5364static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
5365{
5366 int ret;
5367
5368 if (addr > (unsigned int)(-3 * PAGE_SIZE))
5369 return -EINVAL;
5370 ret = static_call(kvm_x86_set_tss_addr)(kvm, addr);
5371 return ret;
5372}
5373
5374static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
5375 u64 ident_addr)
5376{
5377 return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr);
5378}
5379
5380static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
5381 unsigned long kvm_nr_mmu_pages)
5382{
5383 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
5384 return -EINVAL;
5385
5386 mutex_lock(&kvm->slots_lock);
5387
5388 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
5389 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
5390
5391 mutex_unlock(&kvm->slots_lock);
5392 return 0;
5393}
5394
5395static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
5396{
5397 return kvm->arch.n_max_mmu_pages;
5398}
5399
5400static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
5401{
5402 struct kvm_pic *pic = kvm->arch.vpic;
5403 int r;
5404
5405 r = 0;
5406 switch (chip->chip_id) {
5407 case KVM_IRQCHIP_PIC_MASTER:
5408 memcpy(&chip->chip.pic, &pic->pics[0],
5409 sizeof(struct kvm_pic_state));
5410 break;
5411 case KVM_IRQCHIP_PIC_SLAVE:
5412 memcpy(&chip->chip.pic, &pic->pics[1],
5413 sizeof(struct kvm_pic_state));
5414 break;
5415 case KVM_IRQCHIP_IOAPIC:
5416 kvm_get_ioapic(kvm, &chip->chip.ioapic);
5417 break;
5418 default:
5419 r = -EINVAL;
5420 break;
5421 }
5422 return r;
5423}
5424
5425static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
5426{
5427 struct kvm_pic *pic = kvm->arch.vpic;
5428 int r;
5429
5430 r = 0;
5431 switch (chip->chip_id) {
5432 case KVM_IRQCHIP_PIC_MASTER:
5433 spin_lock(&pic->lock);
5434 memcpy(&pic->pics[0], &chip->chip.pic,
5435 sizeof(struct kvm_pic_state));
5436 spin_unlock(&pic->lock);
5437 break;
5438 case KVM_IRQCHIP_PIC_SLAVE:
5439 spin_lock(&pic->lock);
5440 memcpy(&pic->pics[1], &chip->chip.pic,
5441 sizeof(struct kvm_pic_state));
5442 spin_unlock(&pic->lock);
5443 break;
5444 case KVM_IRQCHIP_IOAPIC:
5445 kvm_set_ioapic(kvm, &chip->chip.ioapic);
5446 break;
5447 default:
5448 r = -EINVAL;
5449 break;
5450 }
5451 kvm_pic_update_irq(pic);
5452 return r;
5453}
5454
5455static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
5456{
5457 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
5458
5459 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));
5460
5461 mutex_lock(&kps->lock);
5462 memcpy(ps, &kps->channels, sizeof(*ps));
5463 mutex_unlock(&kps->lock);
5464 return 0;
5465}
5466
5467static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
5468{
5469 int i;
5470 struct kvm_pit *pit = kvm->arch.vpit;
5471
5472 mutex_lock(&pit->pit_state.lock);
5473 memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
5474 for (i = 0; i < 3; i++)
5475 kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
5476 mutex_unlock(&pit->pit_state.lock);
5477 return 0;
5478}
5479
5480static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
5481{
5482 mutex_lock(&kvm->arch.vpit->pit_state.lock);
5483 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
5484 sizeof(ps->channels));
5485 ps->flags = kvm->arch.vpit->pit_state.flags;
5486 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
5487 memset(&ps->reserved, 0, sizeof(ps->reserved));
5488 return 0;
5489}
5490
5491static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
5492{
5493 int start = 0;
5494 int i;
5495 u32 prev_legacy, cur_legacy;
5496 struct kvm_pit *pit = kvm->arch.vpit;
5497
5498 mutex_lock(&pit->pit_state.lock);
5499 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
5500 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
5501 if (!prev_legacy && cur_legacy)
5502 start = 1;
5503 memcpy(&pit->pit_state.channels, &ps->channels,
5504 sizeof(pit->pit_state.channels));
5505 pit->pit_state.flags = ps->flags;
5506 for (i = 0; i < 3; i++)
5507 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
5508 start && i == 0);
5509 mutex_unlock(&pit->pit_state.lock);
5510 return 0;
5511}
5512
5513static int kvm_vm_ioctl_reinject(struct kvm *kvm,
5514 struct kvm_reinject_control *control)
5515{
5516 struct kvm_pit *pit = kvm->arch.vpit;
5517
5518
5519
5520
5521
5522 mutex_lock(&pit->pit_state.lock);
5523 kvm_pit_set_reinject(pit, control->pit_reinject);
5524 mutex_unlock(&pit->pit_state.lock);
5525
5526 return 0;
5527}
5528
5529void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
5530{
5531
5532
5533
5534
5535
5536
5537
5538