1
2
3
4
5
6
7
8
9
10#include <linux/context_tracking.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/kvm_para.h>
14#include <linux/cpu.h>
15#include <linux/mm.h>
16#include <linux/highmem.h>
17#include <linux/hardirq.h>
18#include <linux/notifier.h>
19#include <linux/reboot.h>
20#include <linux/hash.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/kprobes.h>
24#include <linux/debugfs.h>
25#include <linux/nmi.h>
26#include <linux/swait.h>
27#include <asm/timer.h>
28#include <asm/cpu.h>
29#include <asm/traps.h>
30#include <asm/desc.h>
31#include <asm/tlbflush.h>
32#include <asm/apic.h>
33#include <asm/apicdef.h>
34#include <asm/hypervisor.h>
35#include <asm/tlb.h>
36#include <asm/cpuidle_haltpoll.h>
37
38static int kvmapf = 1;
39
40static int __init parse_no_kvmapf(char *arg)
41{
42 kvmapf = 0;
43 return 0;
44}
45
46early_param("no-kvmapf", parse_no_kvmapf);
47
48static int steal_acc = 1;
49static int __init parse_no_stealacc(char *arg)
50{
51 steal_acc = 0;
52 return 0;
53}
54
55early_param("no-steal-acc", parse_no_stealacc);
56
57static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
58DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
59static int has_steal_clock = 0;
60
61
62
63
64static void kvm_io_delay(void)
65{
66}
67
68#define KVM_TASK_SLEEP_HASHBITS 8
69#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
70
71struct kvm_task_sleep_node {
72 struct hlist_node link;
73 struct swait_queue_head wq;
74 u32 token;
75 int cpu;
76 bool halted;
77};
78
79static struct kvm_task_sleep_head {
80 raw_spinlock_t lock;
81 struct hlist_head list;
82} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
83
84static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
85 u32 token)
86{
87 struct hlist_node *p;
88
89 hlist_for_each(p, &b->list) {
90 struct kvm_task_sleep_node *n =
91 hlist_entry(p, typeof(*n), link);
92 if (n->token == token)
93 return n;
94 }
95
96 return NULL;
97}
98
99
100
101
102
103void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
104{
105 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
106 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
107 struct kvm_task_sleep_node n, *e;
108 DECLARE_SWAITQUEUE(wait);
109
110 rcu_irq_enter();
111
112 raw_spin_lock(&b->lock);
113 e = _find_apf_task(b, token);
114 if (e) {
115
116 hlist_del(&e->link);
117 kfree(e);
118 raw_spin_unlock(&b->lock);
119
120 rcu_irq_exit();
121 return;
122 }
123
124 n.token = token;
125 n.cpu = smp_processor_id();
126 n.halted = is_idle_task(current) ||
127 (IS_ENABLED(CONFIG_PREEMPT_COUNT)
128 ? preempt_count() > 1 || rcu_preempt_depth()
129 : interrupt_kernel);
130 init_swait_queue_head(&n.wq);
131 hlist_add_head(&n.link, &b->list);
132 raw_spin_unlock(&b->lock);
133
134 for (;;) {
135 if (!n.halted)
136 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
137 if (hlist_unhashed(&n.link))
138 break;
139
140 rcu_irq_exit();
141
142 if (!n.halted) {
143 local_irq_enable();
144 schedule();
145 local_irq_disable();
146 } else {
147
148
149
150 native_safe_halt();
151 local_irq_disable();
152 }
153
154 rcu_irq_enter();
155 }
156 if (!n.halted)
157 finish_swait(&n.wq, &wait);
158
159 rcu_irq_exit();
160 return;
161}
162EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
163
164static void apf_task_wake_one(struct kvm_task_sleep_node *n)
165{
166 hlist_del_init(&n->link);
167 if (n->halted)
168 smp_send_reschedule(n->cpu);
169 else if (swq_has_sleeper(&n->wq))
170 swake_up_one(&n->wq);
171}
172
173static void apf_task_wake_all(void)
174{
175 int i;
176
177 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
178 struct hlist_node *p, *next;
179 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
180 raw_spin_lock(&b->lock);
181 hlist_for_each_safe(p, next, &b->list) {
182 struct kvm_task_sleep_node *n =
183 hlist_entry(p, typeof(*n), link);
184 if (n->cpu == smp_processor_id())
185 apf_task_wake_one(n);
186 }
187 raw_spin_unlock(&b->lock);
188 }
189}
190
191void kvm_async_pf_task_wake(u32 token)
192{
193 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
194 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
195 struct kvm_task_sleep_node *n;
196
197 if (token == ~0) {
198 apf_task_wake_all();
199 return;
200 }
201
202again:
203 raw_spin_lock(&b->lock);
204 n = _find_apf_task(b, token);
205 if (!n) {
206
207
208
209
210 n = kzalloc(sizeof(*n), GFP_ATOMIC);
211 if (!n) {
212
213
214
215
216 raw_spin_unlock(&b->lock);
217 cpu_relax();
218 goto again;
219 }
220 n->token = token;
221 n->cpu = smp_processor_id();
222 init_swait_queue_head(&n->wq);
223 hlist_add_head(&n->link, &b->list);
224 } else
225 apf_task_wake_one(n);
226 raw_spin_unlock(&b->lock);
227 return;
228}
229EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
230
231u32 kvm_read_and_reset_pf_reason(void)
232{
233 u32 reason = 0;
234
235 if (__this_cpu_read(apf_reason.enabled)) {
236 reason = __this_cpu_read(apf_reason.reason);
237 __this_cpu_write(apf_reason.reason, 0);
238 }
239
240 return reason;
241}
242EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
243NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
244
245dotraplinkage void
246do_async_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
247{
248 switch (kvm_read_and_reset_pf_reason()) {
249 default:
250 do_page_fault(regs, error_code, address);
251 break;
252 case KVM_PV_REASON_PAGE_NOT_PRESENT:
253
254 kvm_async_pf_task_wait((u32)address, !user_mode(regs));
255 break;
256 case KVM_PV_REASON_PAGE_READY:
257 rcu_irq_enter();
258 kvm_async_pf_task_wake((u32)address);
259 rcu_irq_exit();
260 break;
261 }
262}
263NOKPROBE_SYMBOL(do_async_page_fault);
264
265static void __init paravirt_ops_setup(void)
266{
267 pv_info.name = "KVM";
268
269 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
270 pv_ops.cpu.io_delay = kvm_io_delay;
271
272#ifdef CONFIG_X86_IO_APIC
273 no_timer_check = 1;
274#endif
275}
276
277static void kvm_register_steal_time(void)
278{
279 int cpu = smp_processor_id();
280 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
281
282 if (!has_steal_clock)
283 return;
284
285 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
286 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
287 cpu, (unsigned long long) slow_virt_to_phys(st));
288}
289
290static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
291
292static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
293{
294
295
296
297
298
299
300
301 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
302 return;
303 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
304}
305
306static void kvm_guest_cpu_init(void)
307{
308 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
309 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
310
311#ifdef CONFIG_PREEMPTION
312 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
313#endif
314 pa |= KVM_ASYNC_PF_ENABLED;
315
316 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
317 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
318
319 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
320 __this_cpu_write(apf_reason.enabled, 1);
321 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
322 smp_processor_id());
323 }
324
325 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
326 unsigned long pa;
327
328 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
329 __this_cpu_write(kvm_apic_eoi, 0);
330 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
331 | KVM_MSR_ENABLED;
332 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
333 }
334
335 if (has_steal_clock)
336 kvm_register_steal_time();
337}
338
339static void kvm_pv_disable_apf(void)
340{
341 if (!__this_cpu_read(apf_reason.enabled))
342 return;
343
344 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
345 __this_cpu_write(apf_reason.enabled, 0);
346
347 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
348 smp_processor_id());
349}
350
351static void kvm_pv_guest_cpu_reboot(void *unused)
352{
353
354
355
356
357
358 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
359 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
360 kvm_pv_disable_apf();
361 kvm_disable_steal_time();
362}
363
364static int kvm_pv_reboot_notify(struct notifier_block *nb,
365 unsigned long code, void *unused)
366{
367 if (code == SYS_RESTART)
368 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
369 return NOTIFY_DONE;
370}
371
372static struct notifier_block kvm_pv_reboot_nb = {
373 .notifier_call = kvm_pv_reboot_notify,
374};
375
376static u64 kvm_steal_clock(int cpu)
377{
378 u64 steal;
379 struct kvm_steal_time *src;
380 int version;
381
382 src = &per_cpu(steal_time, cpu);
383 do {
384 version = src->version;
385 virt_rmb();
386 steal = src->steal;
387 virt_rmb();
388 } while ((version & 1) || (version != src->version));
389
390 return steal;
391}
392
393void kvm_disable_steal_time(void)
394{
395 if (!has_steal_clock)
396 return;
397
398 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
399}
400
401static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
402{
403 early_set_memory_decrypted((unsigned long) ptr, size);
404}
405
406
407
408
409
410
411
412
413
414static void __init sev_map_percpu_data(void)
415{
416 int cpu;
417
418 if (!sev_active())
419 return;
420
421 for_each_possible_cpu(cpu) {
422 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
423 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
424 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
425 }
426}
427
428static bool pv_tlb_flush_supported(void)
429{
430 return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
431 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
432 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
433}
434
435static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
436
437#ifdef CONFIG_SMP
438
439static bool pv_ipi_supported(void)
440{
441 return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
442}
443
444static bool pv_sched_yield_supported(void)
445{
446 return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
447 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
448 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
449}
450
451#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
452
453static void __send_ipi_mask(const struct cpumask *mask, int vector)
454{
455 unsigned long flags;
456 int cpu, apic_id, icr;
457 int min = 0, max = 0;
458#ifdef CONFIG_X86_64
459 __uint128_t ipi_bitmap = 0;
460#else
461 u64 ipi_bitmap = 0;
462#endif
463 long ret;
464
465 if (cpumask_empty(mask))
466 return;
467
468 local_irq_save(flags);
469
470 switch (vector) {
471 default:
472 icr = APIC_DM_FIXED | vector;
473 break;
474 case NMI_VECTOR:
475 icr = APIC_DM_NMI;
476 break;
477 }
478
479 for_each_cpu(cpu, mask) {
480 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
481 if (!ipi_bitmap) {
482 min = max = apic_id;
483 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
484 ipi_bitmap <<= min - apic_id;
485 min = apic_id;
486 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
487 max = apic_id < max ? max : apic_id;
488 } else {
489 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
490 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
491 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
492 min = max = apic_id;
493 ipi_bitmap = 0;
494 }
495 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
496 }
497
498 if (ipi_bitmap) {
499 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
500 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
501 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
502 }
503
504 local_irq_restore(flags);
505}
506
507static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
508{
509 __send_ipi_mask(mask, vector);
510}
511
512static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
513{
514 unsigned int this_cpu = smp_processor_id();
515 struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
516 const struct cpumask *local_mask;
517
518 cpumask_copy(new_mask, mask);
519 cpumask_clear_cpu(this_cpu, new_mask);
520 local_mask = new_mask;
521 __send_ipi_mask(local_mask, vector);
522}
523
524
525
526
527static void kvm_setup_pv_ipi(void)
528{
529 apic->send_IPI_mask = kvm_send_ipi_mask;
530 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
531 pr_info("KVM setup pv IPIs\n");
532}
533
534static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
535{
536 int cpu;
537
538 native_send_call_func_ipi(mask);
539
540
541 for_each_cpu(cpu, mask) {
542 if (vcpu_is_preempted(cpu)) {
543 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
544 break;
545 }
546 }
547}
548
549static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
550{
551 native_smp_prepare_cpus(max_cpus);
552 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
553 static_branch_disable(&virt_spin_lock_key);
554}
555
556static void __init kvm_smp_prepare_boot_cpu(void)
557{
558
559
560
561
562 sev_map_percpu_data();
563
564 kvm_guest_cpu_init();
565 native_smp_prepare_boot_cpu();
566 kvm_spinlock_init();
567}
568
569static void kvm_guest_cpu_offline(void)
570{
571 kvm_disable_steal_time();
572 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
573 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
574 kvm_pv_disable_apf();
575 apf_task_wake_all();
576}
577
578static int kvm_cpu_online(unsigned int cpu)
579{
580 local_irq_disable();
581 kvm_guest_cpu_init();
582 local_irq_enable();
583 return 0;
584}
585
586static int kvm_cpu_down_prepare(unsigned int cpu)
587{
588 local_irq_disable();
589 kvm_guest_cpu_offline();
590 local_irq_enable();
591 return 0;
592}
593#endif
594
595static void __init kvm_apf_trap_init(void)
596{
597 update_intr_gate(X86_TRAP_PF, async_page_fault);
598}
599
600
601static void kvm_flush_tlb_others(const struct cpumask *cpumask,
602 const struct flush_tlb_info *info)
603{
604 u8 state;
605 int cpu;
606 struct kvm_steal_time *src;
607 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
608
609 cpumask_copy(flushmask, cpumask);
610
611
612
613
614 for_each_cpu(cpu, flushmask) {
615 src = &per_cpu(steal_time, cpu);
616 state = READ_ONCE(src->preempted);
617 if ((state & KVM_VCPU_PREEMPTED)) {
618 if (try_cmpxchg(&src->preempted, &state,
619 state | KVM_VCPU_FLUSH_TLB))
620 __cpumask_clear_cpu(cpu, flushmask);
621 }
622 }
623
624 native_flush_tlb_others(flushmask, info);
625}
626
627static void __init kvm_guest_init(void)
628{
629 int i;
630
631 paravirt_ops_setup();
632 register_reboot_notifier(&kvm_pv_reboot_nb);
633 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
634 raw_spin_lock_init(&async_pf_sleepers[i].lock);
635 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
636 x86_init.irqs.trap_init = kvm_apf_trap_init;
637
638 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
639 has_steal_clock = 1;
640 pv_ops.time.steal_clock = kvm_steal_clock;
641 }
642
643 if (pv_tlb_flush_supported()) {
644 pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
645 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
646 pr_info("KVM setup pv remote TLB flush\n");
647 }
648
649 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
650 apic_set_eoi_write(kvm_guest_apic_eoi_write);
651
652#ifdef CONFIG_SMP
653 smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
654 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
655 if (pv_sched_yield_supported()) {
656 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
657 pr_info("KVM setup pv sched yield\n");
658 }
659 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
660 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
661 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
662#else
663 sev_map_percpu_data();
664 kvm_guest_cpu_init();
665#endif
666
667
668
669
670
671
672 hardlockup_detector_disable();
673}
674
675static noinline uint32_t __kvm_cpuid_base(void)
676{
677 if (boot_cpu_data.cpuid_level < 0)
678 return 0;
679
680 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
681 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
682
683 return 0;
684}
685
686static inline uint32_t kvm_cpuid_base(void)
687{
688 static int kvm_cpuid_base = -1;
689
690 if (kvm_cpuid_base == -1)
691 kvm_cpuid_base = __kvm_cpuid_base();
692
693 return kvm_cpuid_base;
694}
695
696bool kvm_para_available(void)
697{
698 return kvm_cpuid_base() != 0;
699}
700EXPORT_SYMBOL_GPL(kvm_para_available);
701
702unsigned int kvm_arch_para_features(void)
703{
704 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
705}
706
707unsigned int kvm_arch_para_hints(void)
708{
709 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
710}
711EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
712
713static uint32_t __init kvm_detect(void)
714{
715 return kvm_cpuid_base();
716}
717
718static void __init kvm_apic_init(void)
719{
720#if defined(CONFIG_SMP)
721 if (pv_ipi_supported())
722 kvm_setup_pv_ipi();
723#endif
724}
725
726static void __init kvm_init_platform(void)
727{
728 kvmclock_init();
729 x86_platform.apic_post_init = kvm_apic_init;
730}
731
732const __initconst struct hypervisor_x86 x86_hyper_kvm = {
733 .name = "KVM",
734 .detect = kvm_detect,
735 .type = X86_HYPER_KVM,
736 .init.guest_late_init = kvm_guest_init,
737 .init.x2apic_available = kvm_para_available,
738 .init.init_platform = kvm_init_platform,
739};
740
741static __init int activate_jump_labels(void)
742{
743 if (has_steal_clock) {
744 static_key_slow_inc(¶virt_steal_enabled);
745 if (steal_acc)
746 static_key_slow_inc(¶virt_steal_rq_enabled);
747 }
748
749 return 0;
750}
751arch_initcall(activate_jump_labels);
752
753static __init int kvm_alloc_cpumask(void)
754{
755 int cpu;
756 bool alloc = false;
757
758 if (!kvm_para_available() || nopv)
759 return 0;
760
761 if (pv_tlb_flush_supported())
762 alloc = true;
763
764#if defined(CONFIG_SMP)
765 if (pv_ipi_supported())
766 alloc = true;
767#endif
768
769 if (alloc)
770 for_each_possible_cpu(cpu) {
771 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
772 GFP_KERNEL, cpu_to_node(cpu));
773 }
774
775 return 0;
776}
777arch_initcall(kvm_alloc_cpumask);
778
779#ifdef CONFIG_PARAVIRT_SPINLOCKS
780
781
782static void kvm_kick_cpu(int cpu)
783{
784 int apicid;
785 unsigned long flags = 0;
786
787 apicid = per_cpu(x86_cpu_to_apicid, cpu);
788 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
789}
790
791#include <asm/qspinlock.h>
792
793static void kvm_wait(u8 *ptr, u8 val)
794{
795 unsigned long flags;
796
797 if (in_nmi())
798 return;
799
800 local_irq_save(flags);
801
802 if (READ_ONCE(*ptr) != val)
803 goto out;
804
805
806
807
808
809
810 if (arch_irqs_disabled_flags(flags))
811 halt();
812 else
813 safe_halt();
814
815out:
816 local_irq_restore(flags);
817}
818
819#ifdef CONFIG_X86_32
820__visible bool __kvm_vcpu_is_preempted(long cpu)
821{
822 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
823
824 return !!(src->preempted & KVM_VCPU_PREEMPTED);
825}
826PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
827
828#else
829
830#include <asm/asm-offsets.h>
831
832extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
833
834
835
836
837
838asm(
839".pushsection .text;"
840".global __raw_callee_save___kvm_vcpu_is_preempted;"
841".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
842"__raw_callee_save___kvm_vcpu_is_preempted:"
843"movq __per_cpu_offset(,%rdi,8), %rax;"
844"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
845"setne %al;"
846"ret;"
847".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
848".popsection");
849
850#endif
851
852
853
854
855void __init kvm_spinlock_init(void)
856{
857
858 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
859 return;
860
861 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
862 return;
863
864
865 if (num_possible_cpus() == 1)
866 return;
867
868 __pv_init_lock_hash();
869 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
870 pv_ops.lock.queued_spin_unlock =
871 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
872 pv_ops.lock.wait = kvm_wait;
873 pv_ops.lock.kick = kvm_kick_cpu;
874
875 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
876 pv_ops.lock.vcpu_is_preempted =
877 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
878 }
879}
880
881#endif
882
883#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
884
885static void kvm_disable_host_haltpoll(void *i)
886{
887 wrmsrl(MSR_KVM_POLL_CONTROL, 0);
888}
889
890static void kvm_enable_host_haltpoll(void *i)
891{
892 wrmsrl(MSR_KVM_POLL_CONTROL, 1);
893}
894
895void arch_haltpoll_enable(unsigned int cpu)
896{
897 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
898 pr_err_once("kvm: host does not support poll control\n");
899 pr_err_once("kvm: host upgrade recommended\n");
900 return;
901 }
902
903
904 smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
905}
906EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
907
908void arch_haltpoll_disable(unsigned int cpu)
909{
910 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
911 return;
912
913
914 smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
915}
916EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
917#endif
918