1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "kvm-guest: " fmt
11
12#include <linux/context_tracking.h>
13#include <linux/init.h>
14#include <linux/irq.h>
15#include <linux/kernel.h>
16#include <linux/kvm_para.h>
17#include <linux/cpu.h>
18#include <linux/mm.h>
19#include <linux/highmem.h>
20#include <linux/hardirq.h>
21#include <linux/notifier.h>
22#include <linux/reboot.h>
23#include <linux/hash.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/kprobes.h>
27#include <linux/nmi.h>
28#include <linux/swait.h>
29#include <linux/syscore_ops.h>
30#include <asm/timer.h>
31#include <asm/cpu.h>
32#include <asm/traps.h>
33#include <asm/desc.h>
34#include <asm/tlbflush.h>
35#include <asm/apic.h>
36#include <asm/apicdef.h>
37#include <asm/hypervisor.h>
38#include <asm/tlb.h>
39#include <asm/cpuidle_haltpoll.h>
40#include <asm/ptrace.h>
41#include <asm/reboot.h>
42#include <asm/svm.h>
43
44DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
45
46static int kvmapf = 1;
47
48static int __init parse_no_kvmapf(char *arg)
49{
50 kvmapf = 0;
51 return 0;
52}
53
54early_param("no-kvmapf", parse_no_kvmapf);
55
56static int steal_acc = 1;
57static int __init parse_no_stealacc(char *arg)
58{
59 steal_acc = 0;
60 return 0;
61}
62
63early_param("no-steal-acc", parse_no_stealacc);
64
65static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
66DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
67static int has_steal_clock = 0;
68
69
70
71
72static void kvm_io_delay(void)
73{
74}
75
76#define KVM_TASK_SLEEP_HASHBITS 8
77#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
78
79struct kvm_task_sleep_node {
80 struct hlist_node link;
81 struct swait_queue_head wq;
82 u32 token;
83 int cpu;
84};
85
86static struct kvm_task_sleep_head {
87 raw_spinlock_t lock;
88 struct hlist_head list;
89} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
90
91static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
92 u32 token)
93{
94 struct hlist_node *p;
95
96 hlist_for_each(p, &b->list) {
97 struct kvm_task_sleep_node *n =
98 hlist_entry(p, typeof(*n), link);
99 if (n->token == token)
100 return n;
101 }
102
103 return NULL;
104}
105
106static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
107{
108 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
109 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
110 struct kvm_task_sleep_node *e;
111
112 raw_spin_lock(&b->lock);
113 e = _find_apf_task(b, token);
114 if (e) {
115
116 hlist_del(&e->link);
117 raw_spin_unlock(&b->lock);
118 kfree(e);
119 return false;
120 }
121
122 n->token = token;
123 n->cpu = smp_processor_id();
124 init_swait_queue_head(&n->wq);
125 hlist_add_head(&n->link, &b->list);
126 raw_spin_unlock(&b->lock);
127 return true;
128}
129
130
131
132
133
134
135
136
137void kvm_async_pf_task_wait_schedule(u32 token)
138{
139 struct kvm_task_sleep_node n;
140 DECLARE_SWAITQUEUE(wait);
141
142 lockdep_assert_irqs_disabled();
143
144 if (!kvm_async_pf_queue_task(token, &n))
145 return;
146
147 for (;;) {
148 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
149 if (hlist_unhashed(&n.link))
150 break;
151
152 local_irq_enable();
153 schedule();
154 local_irq_disable();
155 }
156 finish_swait(&n.wq, &wait);
157}
158EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule);
159
160static void apf_task_wake_one(struct kvm_task_sleep_node *n)
161{
162 hlist_del_init(&n->link);
163 if (swq_has_sleeper(&n->wq))
164 swake_up_one(&n->wq);
165}
166
167static void apf_task_wake_all(void)
168{
169 int i;
170
171 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
172 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
173 struct kvm_task_sleep_node *n;
174 struct hlist_node *p, *next;
175
176 raw_spin_lock(&b->lock);
177 hlist_for_each_safe(p, next, &b->list) {
178 n = hlist_entry(p, typeof(*n), link);
179 if (n->cpu == smp_processor_id())
180 apf_task_wake_one(n);
181 }
182 raw_spin_unlock(&b->lock);
183 }
184}
185
186void kvm_async_pf_task_wake(u32 token)
187{
188 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
189 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
190 struct kvm_task_sleep_node *n;
191
192 if (token == ~0) {
193 apf_task_wake_all();
194 return;
195 }
196
197again:
198 raw_spin_lock(&b->lock);
199 n = _find_apf_task(b, token);
200 if (!n) {
201
202
203
204
205 n = kzalloc(sizeof(*n), GFP_ATOMIC);
206 if (!n) {
207
208
209
210
211 raw_spin_unlock(&b->lock);
212 cpu_relax();
213 goto again;
214 }
215 n->token = token;
216 n->cpu = smp_processor_id();
217 init_swait_queue_head(&n->wq);
218 hlist_add_head(&n->link, &b->list);
219 } else {
220 apf_task_wake_one(n);
221 }
222 raw_spin_unlock(&b->lock);
223 return;
224}
225EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
226
227noinstr u32 kvm_read_and_reset_apf_flags(void)
228{
229 u32 flags = 0;
230
231 if (__this_cpu_read(apf_reason.enabled)) {
232 flags = __this_cpu_read(apf_reason.flags);
233 __this_cpu_write(apf_reason.flags, 0);
234 }
235
236 return flags;
237}
238EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
239
240noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
241{
242 u32 flags = kvm_read_and_reset_apf_flags();
243 irqentry_state_t state;
244
245 if (!flags)
246 return false;
247
248 state = irqentry_enter(regs);
249 instrumentation_begin();
250
251
252
253
254
255
256 if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
257 panic("Host injected async #PF in interrupt disabled region\n");
258
259 if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
260 if (unlikely(!(user_mode(regs))))
261 panic("Host injected async #PF in kernel mode\n");
262
263 kvm_async_pf_task_wait_schedule(token);
264 } else {
265 WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
266 }
267
268 instrumentation_end();
269 irqentry_exit(regs, state);
270 return true;
271}
272
273DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
274{
275 struct pt_regs *old_regs = set_irq_regs(regs);
276 u32 token;
277
278 ack_APIC_irq();
279
280 inc_irq_stat(irq_hv_callback_count);
281
282 if (__this_cpu_read(apf_reason.enabled)) {
283 token = __this_cpu_read(apf_reason.token);
284 kvm_async_pf_task_wake(token);
285 __this_cpu_write(apf_reason.token, 0);
286 wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
287 }
288
289 set_irq_regs(old_regs);
290}
291
292static void __init paravirt_ops_setup(void)
293{
294 pv_info.name = "KVM";
295
296 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
297 pv_ops.cpu.io_delay = kvm_io_delay;
298
299#ifdef CONFIG_X86_IO_APIC
300 no_timer_check = 1;
301#endif
302}
303
304static void kvm_register_steal_time(void)
305{
306 int cpu = smp_processor_id();
307 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
308
309 if (!has_steal_clock)
310 return;
311
312 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
313 pr_info("stealtime: cpu %d, msr %llx\n", cpu,
314 (unsigned long long) slow_virt_to_phys(st));
315}
316
317static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
318
319static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
320{
321
322
323
324
325
326
327
328 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
329 return;
330 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
331}
332
333static void kvm_guest_cpu_init(void)
334{
335 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
336 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
337
338 WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
339
340 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
341 pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
342
343 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
344 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
345
346 wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
347
348 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
349 __this_cpu_write(apf_reason.enabled, 1);
350 pr_info("setup async PF for cpu %d\n", smp_processor_id());
351 }
352
353 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
354 unsigned long pa;
355
356
357 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
358 __this_cpu_write(kvm_apic_eoi, 0);
359 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
360 | KVM_MSR_ENABLED;
361 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
362 }
363
364 if (has_steal_clock)
365 kvm_register_steal_time();
366}
367
368static void kvm_pv_disable_apf(void)
369{
370 if (!__this_cpu_read(apf_reason.enabled))
371 return;
372
373 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
374 __this_cpu_write(apf_reason.enabled, 0);
375
376 pr_info("disable async PF for cpu %d\n", smp_processor_id());
377}
378
379static void kvm_disable_steal_time(void)
380{
381 if (!has_steal_clock)
382 return;
383
384 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
385}
386
387static u64 kvm_steal_clock(int cpu)
388{
389 u64 steal;
390 struct kvm_steal_time *src;
391 int version;
392
393 src = &per_cpu(steal_time, cpu);
394 do {
395 version = src->version;
396 virt_rmb();
397 steal = src->steal;
398 virt_rmb();
399 } while ((version & 1) || (version != src->version));
400
401 return steal;
402}
403
404static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
405{
406 early_set_memory_decrypted((unsigned long) ptr, size);
407}
408
409
410
411
412
413
414
415
416
417static void __init sev_map_percpu_data(void)
418{
419 int cpu;
420
421 if (!sev_active())
422 return;
423
424 for_each_possible_cpu(cpu) {
425 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
426 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
427 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
428 }
429}
430
431static void kvm_guest_cpu_offline(bool shutdown)
432{
433 kvm_disable_steal_time();
434 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
435 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
436 kvm_pv_disable_apf();
437 if (!shutdown)
438 apf_task_wake_all();
439 kvmclock_disable();
440}
441
442static int kvm_cpu_online(unsigned int cpu)
443{
444 unsigned long flags;
445
446 local_irq_save(flags);
447 kvm_guest_cpu_init();
448 local_irq_restore(flags);
449 return 0;
450}
451
452#ifdef CONFIG_SMP
453
454static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
455
456static bool pv_tlb_flush_supported(void)
457{
458 return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
459 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
460 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
461}
462
463static bool pv_ipi_supported(void)
464{
465 return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
466}
467
468static bool pv_sched_yield_supported(void)
469{
470 return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
471 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
472 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
473}
474
475#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
476
477static void __send_ipi_mask(const struct cpumask *mask, int vector)
478{
479 unsigned long flags;
480 int cpu, apic_id, icr;
481 int min = 0, max = 0;
482#ifdef CONFIG_X86_64
483 __uint128_t ipi_bitmap = 0;
484#else
485 u64 ipi_bitmap = 0;
486#endif
487 long ret;
488
489 if (cpumask_empty(mask))
490 return;
491
492 local_irq_save(flags);
493
494 switch (vector) {
495 default:
496 icr = APIC_DM_FIXED | vector;
497 break;
498 case NMI_VECTOR:
499 icr = APIC_DM_NMI;
500 break;
501 }
502
503 for_each_cpu(cpu, mask) {
504 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
505 if (!ipi_bitmap) {
506 min = max = apic_id;
507 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
508 ipi_bitmap <<= min - apic_id;
509 min = apic_id;
510 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
511 max = apic_id < max ? max : apic_id;
512 } else {
513 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
514 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
515 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
516 ret);
517 min = max = apic_id;
518 ipi_bitmap = 0;
519 }
520 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
521 }
522
523 if (ipi_bitmap) {
524 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
525 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
526 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
527 ret);
528 }
529
530 local_irq_restore(flags);
531}
532
533static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
534{
535 __send_ipi_mask(mask, vector);
536}
537
538static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
539{
540 unsigned int this_cpu = smp_processor_id();
541 struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
542 const struct cpumask *local_mask;
543
544 cpumask_copy(new_mask, mask);
545 cpumask_clear_cpu(this_cpu, new_mask);
546 local_mask = new_mask;
547 __send_ipi_mask(local_mask, vector);
548}
549
550
551
552
553static void kvm_setup_pv_ipi(void)
554{
555 apic->send_IPI_mask = kvm_send_ipi_mask;
556 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
557 pr_info("setup PV IPIs\n");
558}
559
560static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
561{
562 int cpu;
563
564 native_send_call_func_ipi(mask);
565
566
567 for_each_cpu(cpu, mask) {
568 if (vcpu_is_preempted(cpu)) {
569 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
570 break;
571 }
572 }
573}
574
575static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
576 const struct flush_tlb_info *info)
577{
578 u8 state;
579 int cpu;
580 struct kvm_steal_time *src;
581 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
582
583 cpumask_copy(flushmask, cpumask);
584
585
586
587
588 for_each_cpu(cpu, flushmask) {
589
590
591
592
593
594 src = &per_cpu(steal_time, cpu);
595 state = READ_ONCE(src->preempted);
596 if ((state & KVM_VCPU_PREEMPTED)) {
597 if (try_cmpxchg(&src->preempted, &state,
598 state | KVM_VCPU_FLUSH_TLB))
599 __cpumask_clear_cpu(cpu, flushmask);
600 }
601 }
602
603 native_flush_tlb_multi(flushmask, info);
604}
605
606static __init int kvm_alloc_cpumask(void)
607{
608 int cpu;
609
610 if (!kvm_para_available() || nopv)
611 return 0;
612
613 if (pv_tlb_flush_supported() || pv_ipi_supported())
614 for_each_possible_cpu(cpu) {
615 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
616 GFP_KERNEL, cpu_to_node(cpu));
617 }
618
619 return 0;
620}
621arch_initcall(kvm_alloc_cpumask);
622
623static void __init kvm_smp_prepare_boot_cpu(void)
624{
625
626
627
628
629 sev_map_percpu_data();
630
631 kvm_guest_cpu_init();
632 native_smp_prepare_boot_cpu();
633 kvm_spinlock_init();
634}
635
636static int kvm_cpu_down_prepare(unsigned int cpu)
637{
638 unsigned long flags;
639
640 local_irq_save(flags);
641 kvm_guest_cpu_offline(false);
642 local_irq_restore(flags);
643 return 0;
644}
645
646#endif
647
648static int kvm_suspend(void)
649{
650 kvm_guest_cpu_offline(false);
651
652 return 0;
653}
654
655static void kvm_resume(void)
656{
657 kvm_cpu_online(raw_smp_processor_id());
658}
659
660static struct syscore_ops kvm_syscore_ops = {
661 .suspend = kvm_suspend,
662 .resume = kvm_resume,
663};
664
665static void kvm_pv_guest_cpu_reboot(void *unused)
666{
667 kvm_guest_cpu_offline(true);
668}
669
670static int kvm_pv_reboot_notify(struct notifier_block *nb,
671 unsigned long code, void *unused)
672{
673 if (code == SYS_RESTART)
674 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
675 return NOTIFY_DONE;
676}
677
678static struct notifier_block kvm_pv_reboot_nb = {
679 .notifier_call = kvm_pv_reboot_notify,
680};
681
682
683
684
685
686
687
688#ifdef CONFIG_KEXEC_CORE
689static void kvm_crash_shutdown(struct pt_regs *regs)
690{
691 kvm_guest_cpu_offline(true);
692 native_machine_crash_shutdown(regs);
693}
694#endif
695
696static void __init kvm_guest_init(void)
697{
698 int i;
699
700 paravirt_ops_setup();
701 register_reboot_notifier(&kvm_pv_reboot_nb);
702 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
703 raw_spin_lock_init(&async_pf_sleepers[i].lock);
704
705 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
706 has_steal_clock = 1;
707 static_call_update(pv_steal_clock, kvm_steal_clock);
708 }
709
710 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
711 apic_set_eoi_write(kvm_guest_apic_eoi_write);
712
713 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
714 static_branch_enable(&kvm_async_pf_enabled);
715 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
716 }
717
718#ifdef CONFIG_SMP
719 if (pv_tlb_flush_supported()) {
720 pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi;
721 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
722 pr_info("KVM setup pv remote TLB flush\n");
723 }
724
725 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
726 if (pv_sched_yield_supported()) {
727 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
728 pr_info("setup PV sched yield\n");
729 }
730 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
731 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
732 pr_err("failed to install cpu hotplug callbacks\n");
733#else
734 sev_map_percpu_data();
735 kvm_guest_cpu_init();
736#endif
737
738#ifdef CONFIG_KEXEC_CORE
739 machine_ops.crash_shutdown = kvm_crash_shutdown;
740#endif
741
742 register_syscore_ops(&kvm_syscore_ops);
743
744
745
746
747
748
749 hardlockup_detector_disable();
750}
751
752static noinline uint32_t __kvm_cpuid_base(void)
753{
754 if (boot_cpu_data.cpuid_level < 0)
755 return 0;
756
757 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
758 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
759
760 return 0;
761}
762
763static inline uint32_t kvm_cpuid_base(void)
764{
765 static int kvm_cpuid_base = -1;
766
767 if (kvm_cpuid_base == -1)
768 kvm_cpuid_base = __kvm_cpuid_base();
769
770 return kvm_cpuid_base;
771}
772
773bool kvm_para_available(void)
774{
775 return kvm_cpuid_base() != 0;
776}
777EXPORT_SYMBOL_GPL(kvm_para_available);
778
779unsigned int kvm_arch_para_features(void)
780{
781 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
782}
783
784unsigned int kvm_arch_para_hints(void)
785{
786 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
787}
788EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
789
790static uint32_t __init kvm_detect(void)
791{
792 return kvm_cpuid_base();
793}
794
795static void __init kvm_apic_init(void)
796{
797#ifdef CONFIG_SMP
798 if (pv_ipi_supported())
799 kvm_setup_pv_ipi();
800#endif
801}
802
803static bool __init kvm_msi_ext_dest_id(void)
804{
805 return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID);
806}
807
808static void __init kvm_init_platform(void)
809{
810 kvmclock_init();
811 x86_platform.apic_post_init = kvm_apic_init;
812}
813
814#if defined(CONFIG_AMD_MEM_ENCRYPT)
815static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
816{
817
818 ghcb_set_rbx(ghcb, regs->bx);
819 ghcb_set_rcx(ghcb, regs->cx);
820 ghcb_set_rdx(ghcb, regs->dx);
821 ghcb_set_rsi(ghcb, regs->si);
822}
823
824static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
825{
826
827 return true;
828}
829#endif
830
831const __initconst struct hypervisor_x86 x86_hyper_kvm = {
832 .name = "KVM",
833 .detect = kvm_detect,
834 .type = X86_HYPER_KVM,
835 .init.guest_late_init = kvm_guest_init,
836 .init.x2apic_available = kvm_para_available,
837 .init.msi_ext_dest_id = kvm_msi_ext_dest_id,
838 .init.init_platform = kvm_init_platform,
839#if defined(CONFIG_AMD_MEM_ENCRYPT)
840 .runtime.sev_es_hcall_prepare = kvm_sev_es_hcall_prepare,
841 .runtime.sev_es_hcall_finish = kvm_sev_es_hcall_finish,
842#endif
843};
844
845static __init int activate_jump_labels(void)
846{
847 if (has_steal_clock) {
848 static_key_slow_inc(¶virt_steal_enabled);
849 if (steal_acc)
850 static_key_slow_inc(¶virt_steal_rq_enabled);
851 }
852
853 return 0;
854}
855arch_initcall(activate_jump_labels);
856
857#ifdef CONFIG_PARAVIRT_SPINLOCKS
858
859
860static void kvm_kick_cpu(int cpu)
861{
862 int apicid;
863 unsigned long flags = 0;
864
865 apicid = per_cpu(x86_cpu_to_apicid, cpu);
866 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
867}
868
869#include <asm/qspinlock.h>
870
871static void kvm_wait(u8 *ptr, u8 val)
872{
873 if (in_nmi())
874 return;
875
876
877
878
879
880
881 if (irqs_disabled()) {
882 if (READ_ONCE(*ptr) == val)
883 halt();
884 } else {
885 local_irq_disable();
886
887
888 if (READ_ONCE(*ptr) == val)
889 safe_halt();
890 else
891 local_irq_enable();
892 }
893}
894
895#ifdef CONFIG_X86_32
896__visible bool __kvm_vcpu_is_preempted(long cpu)
897{
898 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
899
900 return !!(src->preempted & KVM_VCPU_PREEMPTED);
901}
902PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
903
904#else
905
906#include <asm/asm-offsets.h>
907
908extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
909
910
911
912
913
914asm(
915".pushsection .text;"
916".global __raw_callee_save___kvm_vcpu_is_preempted;"
917".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
918"__raw_callee_save___kvm_vcpu_is_preempted:"
919"movq __per_cpu_offset(,%rdi,8), %rax;"
920"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
921"setne %al;"
922"ret;"
923".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
924".popsection");
925
926#endif
927
928
929
930
931void __init kvm_spinlock_init(void)
932{
933
934
935
936
937
938 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) {
939 pr_info("PV spinlocks disabled, no host support\n");
940 return;
941 }
942
943
944
945
946
947 if (kvm_para_has_hint(KVM_HINTS_REALTIME)) {
948 pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n");
949 goto out;
950 }
951
952 if (num_possible_cpus() == 1) {
953 pr_info("PV spinlocks disabled, single CPU\n");
954 goto out;
955 }
956
957 if (nopvspin) {
958 pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n");
959 goto out;
960 }
961
962 pr_info("PV spinlocks enabled\n");
963
964 __pv_init_lock_hash();
965 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
966 pv_ops.lock.queued_spin_unlock =
967 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
968 pv_ops.lock.wait = kvm_wait;
969 pv_ops.lock.kick = kvm_kick_cpu;
970
971 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
972 pv_ops.lock.vcpu_is_preempted =
973 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
974 }
975
976
977
978
979
980out:
981 static_branch_disable(&virt_spin_lock_key);
982}
983
984#endif
985
986#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
987
988static void kvm_disable_host_haltpoll(void *i)
989{
990 wrmsrl(MSR_KVM_POLL_CONTROL, 0);
991}
992
993static void kvm_enable_host_haltpoll(void *i)
994{
995 wrmsrl(MSR_KVM_POLL_CONTROL, 1);
996}
997
998void arch_haltpoll_enable(unsigned int cpu)
999{
1000 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
1001 pr_err_once("host does not support poll control\n");
1002 pr_err_once("host upgrade recommended\n");
1003 return;
1004 }
1005
1006
1007 smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
1008}
1009EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
1010
1011void arch_haltpoll_disable(unsigned int cpu)
1012{
1013 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
1014 return;
1015
1016
1017 smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
1018}
1019EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
1020#endif
1021