linux/arch/x86/kvm/x86.h
<<
>>
Prefs
   1#ifndef ARCH_X86_KVM_X86_H
   2#define ARCH_X86_KVM_X86_H
   3
   4#include <linux/kvm_host.h>
   5#include "kvm_cache_regs.h"
   6
   7static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
   8{
   9        vcpu->arch.exception.pending = false;
  10}
  11
  12static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
  13        bool soft)
  14{
  15        vcpu->arch.interrupt.pending = true;
  16        vcpu->arch.interrupt.soft = soft;
  17        vcpu->arch.interrupt.nr = vector;
  18}
  19
  20static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
  21{
  22        vcpu->arch.interrupt.pending = false;
  23}
  24
  25static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
  26{
  27        return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending ||
  28                vcpu->arch.nmi_injected;
  29}
  30
  31static inline bool kvm_exception_is_soft(unsigned int nr)
  32{
  33        return (nr == BP_VECTOR) || (nr == OF_VECTOR);
  34}
  35
  36static inline bool is_protmode(struct kvm_vcpu *vcpu)
  37{
  38        return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
  39}
  40
  41static inline int is_long_mode(struct kvm_vcpu *vcpu)
  42{
  43#ifdef CONFIG_X86_64
  44        return vcpu->arch.efer & EFER_LMA;
  45#else
  46        return 0;
  47#endif
  48}
  49
  50static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
  51{
  52        return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
  53}
  54
  55static inline int is_pae(struct kvm_vcpu *vcpu)
  56{
  57        return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
  58}
  59
  60static inline int is_pse(struct kvm_vcpu *vcpu)
  61{
  62        return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
  63}
  64
  65static inline int is_paging(struct kvm_vcpu *vcpu)
  66{
  67        return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
  68}
  69
  70static inline u32 bit(int bitno)
  71{
  72        return 1 << (bitno & 31);
  73}
  74
  75static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
  76                                        gva_t gva, gfn_t gfn, unsigned access)
  77{
  78        vcpu->arch.mmio_gva = gva & PAGE_MASK;
  79        vcpu->arch.access = access;
  80        vcpu->arch.mmio_gfn = gfn;
  81}
  82
  83/*
  84 * Clear the mmio cache info for the given gva,
  85 * specially, if gva is ~0ul, we clear all mmio cache info.
  86 */
  87static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
  88{
  89        if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
  90                return;
  91
  92        vcpu->arch.mmio_gva = 0;
  93}
  94
  95static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
  96{
  97        if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK))
  98                return true;
  99
 100        return false;
 101}
 102
 103static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
 104{
 105        if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
 106                return true;
 107
 108        return false;
 109}
 110
 111void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
 112void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
 113int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
 114
 115void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
 116
 117int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
 118        gva_t addr, void *val, unsigned int bytes,
 119        struct x86_exception *exception);
 120
 121int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
 122        gva_t addr, void *val, unsigned int bytes,
 123        struct x86_exception *exception);
 124
 125extern u64 host_xcr0;
 126
 127extern struct static_key kvm_no_apic_vcpu;
 128#endif
 129