linux/arch/x86/include/asm/kvm_para.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_KVM_PARA_H
   3#define _ASM_X86_KVM_PARA_H
   4
   5#include <asm/processor.h>
   6#include <asm/alternative.h>
   7#include <linux/interrupt.h>
   8#include <uapi/asm/kvm_para.h>
   9
  10#ifdef CONFIG_KVM_GUEST
  11bool kvm_check_and_clear_guest_paused(void);
  12#else
  13static inline bool kvm_check_and_clear_guest_paused(void)
  14{
  15        return false;
  16}
  17#endif /* CONFIG_KVM_GUEST */
  18
  19#define KVM_HYPERCALL \
  20        ALTERNATIVE("vmcall", "vmmcall", X86_FEATURE_VMMCALL)
  21
  22/* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
  23 * instruction.  The hypervisor may replace it with something else but only the
  24 * instructions are guaranteed to be supported.
  25 *
  26 * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
  27 * The hypercall number should be placed in rax and the return value will be
  28 * placed in rax.  No other registers will be clobbered unless explicitly
  29 * noted by the particular hypercall.
  30 */
  31
  32static inline long kvm_hypercall0(unsigned int nr)
  33{
  34        long ret;
  35        asm volatile(KVM_HYPERCALL
  36                     : "=a"(ret)
  37                     : "a"(nr)
  38                     : "memory");
  39        return ret;
  40}
  41
  42static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
  43{
  44        long ret;
  45        asm volatile(KVM_HYPERCALL
  46                     : "=a"(ret)
  47                     : "a"(nr), "b"(p1)
  48                     : "memory");
  49        return ret;
  50}
  51
  52static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
  53                                  unsigned long p2)
  54{
  55        long ret;
  56        asm volatile(KVM_HYPERCALL
  57                     : "=a"(ret)
  58                     : "a"(nr), "b"(p1), "c"(p2)
  59                     : "memory");
  60        return ret;
  61}
  62
  63static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
  64                                  unsigned long p2, unsigned long p3)
  65{
  66        long ret;
  67        asm volatile(KVM_HYPERCALL
  68                     : "=a"(ret)
  69                     : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
  70                     : "memory");
  71        return ret;
  72}
  73
  74static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
  75                                  unsigned long p2, unsigned long p3,
  76                                  unsigned long p4)
  77{
  78        long ret;
  79        asm volatile(KVM_HYPERCALL
  80                     : "=a"(ret)
  81                     : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
  82                     : "memory");
  83        return ret;
  84}
  85
  86#ifdef CONFIG_KVM_GUEST
  87void kvmclock_init(void);
  88void kvmclock_disable(void);
  89bool kvm_para_available(void);
  90unsigned int kvm_arch_para_features(void);
  91unsigned int kvm_arch_para_hints(void);
  92void kvm_async_pf_task_wait_schedule(u32 token);
  93void kvm_async_pf_task_wake(u32 token);
  94u32 kvm_read_and_reset_apf_flags(void);
  95bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
  96
  97DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
  98
  99static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
 100{
 101        if (static_branch_unlikely(&kvm_async_pf_enabled))
 102                return __kvm_handle_async_pf(regs, token);
 103        else
 104                return false;
 105}
 106
 107#ifdef CONFIG_PARAVIRT_SPINLOCKS
 108void __init kvm_spinlock_init(void);
 109#else /* !CONFIG_PARAVIRT_SPINLOCKS */
 110static inline void kvm_spinlock_init(void)
 111{
 112}
 113#endif /* CONFIG_PARAVIRT_SPINLOCKS */
 114
 115#else /* CONFIG_KVM_GUEST */
 116#define kvm_async_pf_task_wait_schedule(T) do {} while(0)
 117#define kvm_async_pf_task_wake(T) do {} while(0)
 118
 119static inline bool kvm_para_available(void)
 120{
 121        return false;
 122}
 123
 124static inline unsigned int kvm_arch_para_features(void)
 125{
 126        return 0;
 127}
 128
 129static inline unsigned int kvm_arch_para_hints(void)
 130{
 131        return 0;
 132}
 133
 134static inline u32 kvm_read_and_reset_apf_flags(void)
 135{
 136        return 0;
 137}
 138
 139static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
 140{
 141        return false;
 142}
 143#endif
 144
 145#endif /* _ASM_X86_KVM_PARA_H */
 146