1
2
3
4
5
6
7
8
9
10#include <linux/context_tracking.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/kvm_para.h>
14#include <linux/cpu.h>
15#include <linux/mm.h>
16#include <linux/highmem.h>
17#include <linux/hardirq.h>
18#include <linux/notifier.h>
19#include <linux/reboot.h>
20#include <linux/hash.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/kprobes.h>
24#include <linux/debugfs.h>
25#include <linux/nmi.h>
26#include <linux/swait.h>
27#include <asm/timer.h>
28#include <asm/cpu.h>
29#include <asm/traps.h>
30#include <asm/desc.h>
31#include <asm/tlbflush.h>
32#include <asm/apic.h>
33#include <asm/apicdef.h>
34#include <asm/hypervisor.h>
35#include <asm/tlb.h>
36
37static int kvmapf = 1;
38
39static int __init parse_no_kvmapf(char *arg)
40{
41 kvmapf = 0;
42 return 0;
43}
44
45early_param("no-kvmapf", parse_no_kvmapf);
46
47static int steal_acc = 1;
48static int __init parse_no_stealacc(char *arg)
49{
50 steal_acc = 0;
51 return 0;
52}
53
54early_param("no-steal-acc", parse_no_stealacc);
55
56static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
57DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
58static int has_steal_clock = 0;
59
60
61
62
63static void kvm_io_delay(void)
64{
65}
66
67#define KVM_TASK_SLEEP_HASHBITS 8
68#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
69
70struct kvm_task_sleep_node {
71 struct hlist_node link;
72 struct swait_queue_head wq;
73 u32 token;
74 int cpu;
75 bool halted;
76};
77
78static struct kvm_task_sleep_head {
79 raw_spinlock_t lock;
80 struct hlist_head list;
81} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
82
83static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
84 u32 token)
85{
86 struct hlist_node *p;
87
88 hlist_for_each(p, &b->list) {
89 struct kvm_task_sleep_node *n =
90 hlist_entry(p, typeof(*n), link);
91 if (n->token == token)
92 return n;
93 }
94
95 return NULL;
96}
97
98
99
100
101
102void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
103{
104 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
105 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
106 struct kvm_task_sleep_node n, *e;
107 DECLARE_SWAITQUEUE(wait);
108
109 rcu_irq_enter();
110
111 raw_spin_lock(&b->lock);
112 e = _find_apf_task(b, token);
113 if (e) {
114
115 hlist_del(&e->link);
116 kfree(e);
117 raw_spin_unlock(&b->lock);
118
119 rcu_irq_exit();
120 return;
121 }
122
123 n.token = token;
124 n.cpu = smp_processor_id();
125 n.halted = is_idle_task(current) ||
126 (IS_ENABLED(CONFIG_PREEMPT_COUNT)
127 ? preempt_count() > 1 || rcu_preempt_depth()
128 : interrupt_kernel);
129 init_swait_queue_head(&n.wq);
130 hlist_add_head(&n.link, &b->list);
131 raw_spin_unlock(&b->lock);
132
133 for (;;) {
134 if (!n.halted)
135 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
136 if (hlist_unhashed(&n.link))
137 break;
138
139 rcu_irq_exit();
140
141 if (!n.halted) {
142 local_irq_enable();
143 schedule();
144 local_irq_disable();
145 } else {
146
147
148
149 native_safe_halt();
150 local_irq_disable();
151 }
152
153 rcu_irq_enter();
154 }
155 if (!n.halted)
156 finish_swait(&n.wq, &wait);
157
158 rcu_irq_exit();
159 return;
160}
161EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
162
163static void apf_task_wake_one(struct kvm_task_sleep_node *n)
164{
165 hlist_del_init(&n->link);
166 if (n->halted)
167 smp_send_reschedule(n->cpu);
168 else if (swq_has_sleeper(&n->wq))
169 swake_up_one(&n->wq);
170}
171
172static void apf_task_wake_all(void)
173{
174 int i;
175
176 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
177 struct hlist_node *p, *next;
178 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
179 raw_spin_lock(&b->lock);
180 hlist_for_each_safe(p, next, &b->list) {
181 struct kvm_task_sleep_node *n =
182 hlist_entry(p, typeof(*n), link);
183 if (n->cpu == smp_processor_id())
184 apf_task_wake_one(n);
185 }
186 raw_spin_unlock(&b->lock);
187 }
188}
189
190void kvm_async_pf_task_wake(u32 token)
191{
192 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
193 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
194 struct kvm_task_sleep_node *n;
195
196 if (token == ~0) {
197 apf_task_wake_all();
198 return;
199 }
200
201again:
202 raw_spin_lock(&b->lock);
203 n = _find_apf_task(b, token);
204 if (!n) {
205
206
207
208
209 n = kzalloc(sizeof(*n), GFP_ATOMIC);
210 if (!n) {
211
212
213
214
215 raw_spin_unlock(&b->lock);
216 cpu_relax();
217 goto again;
218 }
219 n->token = token;
220 n->cpu = smp_processor_id();
221 init_swait_queue_head(&n->wq);
222 hlist_add_head(&n->link, &b->list);
223 } else
224 apf_task_wake_one(n);
225 raw_spin_unlock(&b->lock);
226 return;
227}
228EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
229
230u32 kvm_read_and_reset_pf_reason(void)
231{
232 u32 reason = 0;
233
234 if (__this_cpu_read(apf_reason.enabled)) {
235 reason = __this_cpu_read(apf_reason.reason);
236 __this_cpu_write(apf_reason.reason, 0);
237 }
238
239 return reason;
240}
241EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
242NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
243
244dotraplinkage void
245do_async_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
246{
247 enum ctx_state prev_state;
248
249 switch (kvm_read_and_reset_pf_reason()) {
250 default:
251 do_page_fault(regs, error_code, address);
252 break;
253 case KVM_PV_REASON_PAGE_NOT_PRESENT:
254
255 prev_state = exception_enter();
256 kvm_async_pf_task_wait((u32)address, !user_mode(regs));
257 exception_exit(prev_state);
258 break;
259 case KVM_PV_REASON_PAGE_READY:
260 rcu_irq_enter();
261 kvm_async_pf_task_wake((u32)address);
262 rcu_irq_exit();
263 break;
264 }
265}
266NOKPROBE_SYMBOL(do_async_page_fault);
267
268static void __init paravirt_ops_setup(void)
269{
270 pv_info.name = "KVM";
271
272 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
273 pv_ops.cpu.io_delay = kvm_io_delay;
274
275#ifdef CONFIG_X86_IO_APIC
276 no_timer_check = 1;
277#endif
278}
279
280static void kvm_register_steal_time(void)
281{
282 int cpu = smp_processor_id();
283 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
284
285 if (!has_steal_clock)
286 return;
287
288 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
289 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
290 cpu, (unsigned long long) slow_virt_to_phys(st));
291}
292
293static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
294
295static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
296{
297
298
299
300
301
302
303
304 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
305 return;
306 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
307}
308
309static void kvm_guest_cpu_init(void)
310{
311 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
312 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
313
314#ifdef CONFIG_PREEMPT
315 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
316#endif
317 pa |= KVM_ASYNC_PF_ENABLED;
318
319 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
320 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
321
322 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
323 __this_cpu_write(apf_reason.enabled, 1);
324 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
325 smp_processor_id());
326 }
327
328 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
329 unsigned long pa;
330
331 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
332 __this_cpu_write(kvm_apic_eoi, 0);
333 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
334 | KVM_MSR_ENABLED;
335 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
336 }
337
338 if (has_steal_clock)
339 kvm_register_steal_time();
340}
341
342static void kvm_pv_disable_apf(void)
343{
344 if (!__this_cpu_read(apf_reason.enabled))
345 return;
346
347 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
348 __this_cpu_write(apf_reason.enabled, 0);
349
350 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
351 smp_processor_id());
352}
353
354static void kvm_pv_guest_cpu_reboot(void *unused)
355{
356
357
358
359
360
361 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
362 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
363 kvm_pv_disable_apf();
364 kvm_disable_steal_time();
365}
366
367static int kvm_pv_reboot_notify(struct notifier_block *nb,
368 unsigned long code, void *unused)
369{
370 if (code == SYS_RESTART)
371 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
372 return NOTIFY_DONE;
373}
374
375static struct notifier_block kvm_pv_reboot_nb = {
376 .notifier_call = kvm_pv_reboot_notify,
377};
378
379static u64 kvm_steal_clock(int cpu)
380{
381 u64 steal;
382 struct kvm_steal_time *src;
383 int version;
384
385 src = &per_cpu(steal_time, cpu);
386 do {
387 version = src->version;
388 virt_rmb();
389 steal = src->steal;
390 virt_rmb();
391 } while ((version & 1) || (version != src->version));
392
393 return steal;
394}
395
396void kvm_disable_steal_time(void)
397{
398 if (!has_steal_clock)
399 return;
400
401 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
402}
403
404static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
405{
406 early_set_memory_decrypted((unsigned long) ptr, size);
407}
408
409
410
411
412
413
414
415
416
417static void __init sev_map_percpu_data(void)
418{
419 int cpu;
420
421 if (!sev_active())
422 return;
423
424 for_each_possible_cpu(cpu) {
425 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
426 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
427 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
428 }
429}
430
431#ifdef CONFIG_SMP
432#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
433
434static void __send_ipi_mask(const struct cpumask *mask, int vector)
435{
436 unsigned long flags;
437 int cpu, apic_id, icr;
438 int min = 0, max = 0;
439#ifdef CONFIG_X86_64
440 __uint128_t ipi_bitmap = 0;
441#else
442 u64 ipi_bitmap = 0;
443#endif
444 long ret;
445
446 if (cpumask_empty(mask))
447 return;
448
449 local_irq_save(flags);
450
451 switch (vector) {
452 default:
453 icr = APIC_DM_FIXED | vector;
454 break;
455 case NMI_VECTOR:
456 icr = APIC_DM_NMI;
457 break;
458 }
459
460 for_each_cpu(cpu, mask) {
461 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
462 if (!ipi_bitmap) {
463 min = max = apic_id;
464 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
465 ipi_bitmap <<= min - apic_id;
466 min = apic_id;
467 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
468 max = apic_id < max ? max : apic_id;
469 } else {
470 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
471 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
472 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
473 min = max = apic_id;
474 ipi_bitmap = 0;
475 }
476 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
477 }
478
479 if (ipi_bitmap) {
480 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
481 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
482 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
483 }
484
485 local_irq_restore(flags);
486}
487
488static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
489{
490 __send_ipi_mask(mask, vector);
491}
492
493static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
494{
495 unsigned int this_cpu = smp_processor_id();
496 struct cpumask new_mask;
497 const struct cpumask *local_mask;
498
499 cpumask_copy(&new_mask, mask);
500 cpumask_clear_cpu(this_cpu, &new_mask);
501 local_mask = &new_mask;
502 __send_ipi_mask(local_mask, vector);
503}
504
505static void kvm_send_ipi_allbutself(int vector)
506{
507 kvm_send_ipi_mask_allbutself(cpu_online_mask, vector);
508}
509
510static void kvm_send_ipi_all(int vector)
511{
512 __send_ipi_mask(cpu_online_mask, vector);
513}
514
515
516
517
518static void kvm_setup_pv_ipi(void)
519{
520 apic->send_IPI_mask = kvm_send_ipi_mask;
521 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
522 apic->send_IPI_allbutself = kvm_send_ipi_allbutself;
523 apic->send_IPI_all = kvm_send_ipi_all;
524 pr_info("KVM setup pv IPIs\n");
525}
526
527static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
528{
529 int cpu;
530
531 native_send_call_func_ipi(mask);
532
533
534 for_each_cpu(cpu, mask) {
535 if (vcpu_is_preempted(cpu)) {
536 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
537 break;
538 }
539 }
540}
541
542static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
543{
544 native_smp_prepare_cpus(max_cpus);
545 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
546 static_branch_disable(&virt_spin_lock_key);
547}
548
549static void __init kvm_smp_prepare_boot_cpu(void)
550{
551
552
553
554
555 sev_map_percpu_data();
556
557 kvm_guest_cpu_init();
558 native_smp_prepare_boot_cpu();
559 kvm_spinlock_init();
560}
561
562static void kvm_guest_cpu_offline(void)
563{
564 kvm_disable_steal_time();
565 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
566 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
567 kvm_pv_disable_apf();
568 apf_task_wake_all();
569}
570
571static int kvm_cpu_online(unsigned int cpu)
572{
573 local_irq_disable();
574 kvm_guest_cpu_init();
575 local_irq_enable();
576 return 0;
577}
578
579static int kvm_cpu_down_prepare(unsigned int cpu)
580{
581 local_irq_disable();
582 kvm_guest_cpu_offline();
583 local_irq_enable();
584 return 0;
585}
586#endif
587
588static void __init kvm_apf_trap_init(void)
589{
590 update_intr_gate(X86_TRAP_PF, async_page_fault);
591}
592
593static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
594
595static void kvm_flush_tlb_others(const struct cpumask *cpumask,
596 const struct flush_tlb_info *info)
597{
598 u8 state;
599 int cpu;
600 struct kvm_steal_time *src;
601 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
602
603 cpumask_copy(flushmask, cpumask);
604
605
606
607
608 for_each_cpu(cpu, flushmask) {
609 src = &per_cpu(steal_time, cpu);
610 state = READ_ONCE(src->preempted);
611 if ((state & KVM_VCPU_PREEMPTED)) {
612 if (try_cmpxchg(&src->preempted, &state,
613 state | KVM_VCPU_FLUSH_TLB))
614 __cpumask_clear_cpu(cpu, flushmask);
615 }
616 }
617
618 native_flush_tlb_others(flushmask, info);
619}
620
621static void __init kvm_guest_init(void)
622{
623 int i;
624
625 paravirt_ops_setup();
626 register_reboot_notifier(&kvm_pv_reboot_nb);
627 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
628 raw_spin_lock_init(&async_pf_sleepers[i].lock);
629 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
630 x86_init.irqs.trap_init = kvm_apf_trap_init;
631
632 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
633 has_steal_clock = 1;
634 pv_ops.time.steal_clock = kvm_steal_clock;
635 }
636
637 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
638 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
639 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
640 pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
641 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
642 }
643
644 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
645 apic_set_eoi_write(kvm_guest_apic_eoi_write);
646
647#ifdef CONFIG_SMP
648 smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
649 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
650 if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
651 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
652 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
653 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
654 pr_info("KVM setup pv sched yield\n");
655 }
656 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
657 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
658 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
659#else
660 sev_map_percpu_data();
661 kvm_guest_cpu_init();
662#endif
663
664
665
666
667
668
669 hardlockup_detector_disable();
670}
671
672static noinline uint32_t __kvm_cpuid_base(void)
673{
674 if (boot_cpu_data.cpuid_level < 0)
675 return 0;
676
677 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
678 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
679
680 return 0;
681}
682
683static inline uint32_t kvm_cpuid_base(void)
684{
685 static int kvm_cpuid_base = -1;
686
687 if (kvm_cpuid_base == -1)
688 kvm_cpuid_base = __kvm_cpuid_base();
689
690 return kvm_cpuid_base;
691}
692
693bool kvm_para_available(void)
694{
695 return kvm_cpuid_base() != 0;
696}
697EXPORT_SYMBOL_GPL(kvm_para_available);
698
699unsigned int kvm_arch_para_features(void)
700{
701 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
702}
703
704unsigned int kvm_arch_para_hints(void)
705{
706 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
707}
708
709static uint32_t __init kvm_detect(void)
710{
711 return kvm_cpuid_base();
712}
713
714static void __init kvm_apic_init(void)
715{
716#if defined(CONFIG_SMP)
717 if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
718 kvm_setup_pv_ipi();
719#endif
720}
721
722static void __init kvm_init_platform(void)
723{
724 kvmclock_init();
725 x86_platform.apic_post_init = kvm_apic_init;
726}
727
728const __initconst struct hypervisor_x86 x86_hyper_kvm = {
729 .name = "KVM",
730 .detect = kvm_detect,
731 .type = X86_HYPER_KVM,
732 .init.guest_late_init = kvm_guest_init,
733 .init.x2apic_available = kvm_para_available,
734 .init.init_platform = kvm_init_platform,
735};
736
737static __init int activate_jump_labels(void)
738{
739 if (has_steal_clock) {
740 static_key_slow_inc(¶virt_steal_enabled);
741 if (steal_acc)
742 static_key_slow_inc(¶virt_steal_rq_enabled);
743 }
744
745 return 0;
746}
747arch_initcall(activate_jump_labels);
748
749static __init int kvm_setup_pv_tlb_flush(void)
750{
751 int cpu;
752
753 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
754 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
755 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
756 for_each_possible_cpu(cpu) {
757 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
758 GFP_KERNEL, cpu_to_node(cpu));
759 }
760 pr_info("KVM setup pv remote TLB flush\n");
761 }
762
763 return 0;
764}
765arch_initcall(kvm_setup_pv_tlb_flush);
766
767#ifdef CONFIG_PARAVIRT_SPINLOCKS
768
769
770static void kvm_kick_cpu(int cpu)
771{
772 int apicid;
773 unsigned long flags = 0;
774
775 apicid = per_cpu(x86_cpu_to_apicid, cpu);
776 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
777}
778
779#include <asm/qspinlock.h>
780
781static void kvm_wait(u8 *ptr, u8 val)
782{
783 unsigned long flags;
784
785 if (in_nmi())
786 return;
787
788 local_irq_save(flags);
789
790 if (READ_ONCE(*ptr) != val)
791 goto out;
792
793
794
795
796
797
798 if (arch_irqs_disabled_flags(flags))
799 halt();
800 else
801 safe_halt();
802
803out:
804 local_irq_restore(flags);
805}
806
807#ifdef CONFIG_X86_32
808__visible bool __kvm_vcpu_is_preempted(long cpu)
809{
810 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
811
812 return !!(src->preempted & KVM_VCPU_PREEMPTED);
813}
814PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
815
816#else
817
818#include <asm/asm-offsets.h>
819
820extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
821
822
823
824
825
826asm(
827".pushsection .text;"
828".global __raw_callee_save___kvm_vcpu_is_preempted;"
829".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
830"__raw_callee_save___kvm_vcpu_is_preempted:"
831"movq __per_cpu_offset(,%rdi,8), %rax;"
832"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
833"setne %al;"
834"ret;"
835".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
836".popsection");
837
838#endif
839
840
841
842
843void __init kvm_spinlock_init(void)
844{
845
846 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
847 return;
848
849 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
850 return;
851
852
853 if (num_possible_cpus() == 1)
854 return;
855
856 __pv_init_lock_hash();
857 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
858 pv_ops.lock.queued_spin_unlock =
859 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
860 pv_ops.lock.wait = kvm_wait;
861 pv_ops.lock.kick = kvm_kick_cpu;
862
863 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
864 pv_ops.lock.vcpu_is_preempted =
865 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
866 }
867}
868
869#endif
870