linux/include/linux/kvm_host.h
<<
>>
Prefs
   1#ifndef __KVM_HOST_H
   2#define __KVM_HOST_H
   3
   4/*
   5 * This work is licensed under the terms of the GNU GPL, version 2.  See
   6 * the COPYING file in the top-level directory.
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/hardirq.h>
  11#include <linux/list.h>
  12#include <linux/mutex.h>
  13#include <linux/spinlock.h>
  14#include <linux/signal.h>
  15#include <linux/sched.h>
  16#include <linux/mm.h>
  17#include <linux/preempt.h>
  18#include <linux/msi.h>
  19#include <linux/slab.h>
  20#include <linux/rcupdate.h>
  21#include <asm/signal.h>
  22
  23#include <linux/kvm.h>
  24#include <linux/kvm_para.h>
  25
  26#include <linux/kvm_types.h>
  27
  28#include <asm/kvm_host.h>
  29
  30/*
  31 * vcpu->requests bit members
  32 */
  33#define KVM_REQ_TLB_FLUSH          0
  34#define KVM_REQ_MIGRATE_TIMER      1
  35#define KVM_REQ_REPORT_TPR_ACCESS  2
  36#define KVM_REQ_MMU_RELOAD         3
  37#define KVM_REQ_TRIPLE_FAULT       4
  38#define KVM_REQ_PENDING_TIMER      5
  39#define KVM_REQ_UNHALT             6
  40#define KVM_REQ_MMU_SYNC           7
  41#define KVM_REQ_CLOCK_UPDATE       8
  42#define KVM_REQ_KICK               9
  43#define KVM_REQ_DEACTIVATE_FPU    10
  44#define KVM_REQ_EVENT             11
  45#define KVM_REQ_APF_HALT          12
  46
  47#define KVM_USERSPACE_IRQ_SOURCE_ID     0
  48
  49struct kvm;
  50struct kvm_vcpu;
  51extern struct kmem_cache *kvm_vcpu_cache;
  52
  53/*
  54 * It would be nice to use something smarter than a linear search, TBD...
  55 * Thankfully we dont expect many devices to register (famous last words :),
  56 * so until then it will suffice.  At least its abstracted so we can change
  57 * in one place.
  58 */
  59struct kvm_io_bus {
  60        int                   dev_count;
  61#define NR_IOBUS_DEVS 200
  62        struct kvm_io_device *devs[NR_IOBUS_DEVS];
  63};
  64
  65enum kvm_bus {
  66        KVM_MMIO_BUS,
  67        KVM_PIO_BUS,
  68        KVM_NR_BUSES
  69};
  70
  71int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  72                     int len, const void *val);
  73int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
  74                    void *val);
  75int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  76                            struct kvm_io_device *dev);
  77int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  78                              struct kvm_io_device *dev);
  79
  80#ifdef CONFIG_KVM_ASYNC_PF
  81struct kvm_async_pf {
  82        struct work_struct work;
  83        struct list_head link;
  84        struct list_head queue;
  85        struct kvm_vcpu *vcpu;
  86        struct mm_struct *mm;
  87        gva_t gva;
  88        unsigned long addr;
  89        struct kvm_arch_async_pf arch;
  90        struct page *page;
  91        bool done;
  92};
  93
  94void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
  95void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
  96int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
  97                       struct kvm_arch_async_pf *arch);
  98int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
  99#endif
 100
 101struct kvm_vcpu {
 102        struct kvm *kvm;
 103#ifdef CONFIG_PREEMPT_NOTIFIERS
 104        struct preempt_notifier preempt_notifier;
 105#endif
 106        int vcpu_id;
 107        struct mutex mutex;
 108        int   cpu;
 109        atomic_t guest_mode;
 110        struct kvm_run *run;
 111        unsigned long requests;
 112        unsigned long guest_debug;
 113        int srcu_idx;
 114
 115        int fpu_active;
 116        int guest_fpu_loaded, guest_xcr0_loaded;
 117        wait_queue_head_t wq;
 118        int sigset_active;
 119        sigset_t sigset;
 120        struct kvm_vcpu_stat stat;
 121
 122#ifdef CONFIG_HAS_IOMEM
 123        int mmio_needed;
 124        int mmio_read_completed;
 125        int mmio_is_write;
 126        int mmio_size;
 127        unsigned char mmio_data[8];
 128        gpa_t mmio_phys_addr;
 129#endif
 130
 131#ifdef CONFIG_KVM_ASYNC_PF
 132        struct {
 133                u32 queued;
 134                struct list_head queue;
 135                struct list_head done;
 136                spinlock_t lock;
 137        } async_pf;
 138#endif
 139
 140        struct kvm_vcpu_arch arch;
 141};
 142
 143/*
 144 * Some of the bitops functions do not support too long bitmaps.
 145 * This number must be determined not to exceed such limits.
 146 */
 147#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 148
 149struct kvm_lpage_info {
 150        unsigned long rmap_pde;
 151        int write_count;
 152};
 153
 154struct kvm_memory_slot {
 155        gfn_t base_gfn;
 156        unsigned long npages;
 157        unsigned long flags;
 158        unsigned long *rmap;
 159        unsigned long *dirty_bitmap;
 160        unsigned long *dirty_bitmap_head;
 161        struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
 162        unsigned long userspace_addr;
 163        int user_alloc;
 164        int id;
 165};
 166
 167static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
 168{
 169        return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
 170}
 171
 172struct kvm_kernel_irq_routing_entry {
 173        u32 gsi;
 174        u32 type;
 175        int (*set)(struct kvm_kernel_irq_routing_entry *e,
 176                   struct kvm *kvm, int irq_source_id, int level);
 177        union {
 178                struct {
 179                        unsigned irqchip;
 180                        unsigned pin;
 181                } irqchip;
 182                struct msi_msg msi;
 183        };
 184        struct hlist_node link;
 185};
 186
 187#ifdef __KVM_HAVE_IOAPIC
 188
 189struct kvm_irq_routing_table {
 190        int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
 191        struct kvm_kernel_irq_routing_entry *rt_entries;
 192        u32 nr_rt_entries;
 193        /*
 194         * Array indexed by gsi. Each entry contains list of irq chips
 195         * the gsi is connected to.
 196         */
 197        struct hlist_head map[0];
 198};
 199
 200#else
 201
 202struct kvm_irq_routing_table {};
 203
 204#endif
 205
 206struct kvm_memslots {
 207        int nmemslots;
 208        u64 generation;
 209        struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
 210                                        KVM_PRIVATE_MEM_SLOTS];
 211};
 212
 213struct kvm {
 214        spinlock_t mmu_lock;
 215        raw_spinlock_t requests_lock;
 216        struct mutex slots_lock;
 217        struct mm_struct *mm; /* userspace tied to this vm */
 218        struct kvm_memslots *memslots;
 219        struct srcu_struct srcu;
 220#ifdef CONFIG_KVM_APIC_ARCHITECTURE
 221        u32 bsp_vcpu_id;
 222        struct kvm_vcpu *bsp_vcpu;
 223#endif
 224        struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
 225        atomic_t online_vcpus;
 226        struct list_head vm_list;
 227        struct mutex lock;
 228        struct kvm_io_bus *buses[KVM_NR_BUSES];
 229#ifdef CONFIG_HAVE_KVM_EVENTFD
 230        struct {
 231                spinlock_t        lock;
 232                struct list_head  items;
 233        } irqfds;
 234        struct list_head ioeventfds;
 235#endif
 236        struct kvm_vm_stat stat;
 237        struct kvm_arch arch;
 238        atomic_t users_count;
 239#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
 240        struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
 241        struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
 242#endif
 243
 244        struct mutex irq_lock;
 245#ifdef CONFIG_HAVE_KVM_IRQCHIP
 246        /*
 247         * Update side is protected by irq_lock and,
 248         * if configured, irqfds.lock.
 249         */
 250        struct kvm_irq_routing_table __rcu *irq_routing;
 251        struct hlist_head mask_notifier_list;
 252        struct hlist_head irq_ack_notifier_list;
 253#endif
 254
 255#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
 256        struct mmu_notifier mmu_notifier;
 257        unsigned long mmu_notifier_seq;
 258        long mmu_notifier_count;
 259#endif
 260        long tlbs_dirty;
 261};
 262
 263/* The guest did something we don't support. */
 264#define pr_unimpl(vcpu, fmt, ...)                                       \
 265 do {                                                                   \
 266        if (printk_ratelimit())                                         \
 267                printk(KERN_ERR "kvm: %i: cpu%i " fmt,                  \
 268                       current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
 269 } while (0)
 270
 271#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
 272#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
 273
 274static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 275{
 276        smp_rmb();
 277        return kvm->vcpus[i];
 278}
 279
 280#define kvm_for_each_vcpu(idx, vcpup, kvm) \
 281        for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \
 282             idx < atomic_read(&kvm->online_vcpus) && vcpup; \
 283             vcpup = kvm_get_vcpu(kvm, ++idx))
 284
 285int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
 286void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
 287
 288void vcpu_load(struct kvm_vcpu *vcpu);
 289void vcpu_put(struct kvm_vcpu *vcpu);
 290
 291int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 292                  struct module *module);
 293void kvm_exit(void);
 294
 295void kvm_get_kvm(struct kvm *kvm);
 296void kvm_put_kvm(struct kvm *kvm);
 297
 298static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
 299{
 300        return rcu_dereference_check(kvm->memslots,
 301                        srcu_read_lock_held(&kvm->srcu)
 302                        || lockdep_is_held(&kvm->slots_lock));
 303}
 304
 305#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
 306#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
 307static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
 308
 309extern struct page *bad_page;
 310extern pfn_t bad_pfn;
 311
 312int is_error_page(struct page *page);
 313int is_error_pfn(pfn_t pfn);
 314int is_hwpoison_pfn(pfn_t pfn);
 315int is_fault_pfn(pfn_t pfn);
 316int kvm_is_error_hva(unsigned long addr);
 317int kvm_set_memory_region(struct kvm *kvm,
 318                          struct kvm_userspace_memory_region *mem,
 319                          int user_alloc);
 320int __kvm_set_memory_region(struct kvm *kvm,
 321                            struct kvm_userspace_memory_region *mem,
 322                            int user_alloc);
 323int kvm_arch_prepare_memory_region(struct kvm *kvm,
 324                                struct kvm_memory_slot *memslot,
 325                                struct kvm_memory_slot old,
 326                                struct kvm_userspace_memory_region *mem,
 327                                int user_alloc);
 328void kvm_arch_commit_memory_region(struct kvm *kvm,
 329                                struct kvm_userspace_memory_region *mem,
 330                                struct kvm_memory_slot old,
 331                                int user_alloc);
 332void kvm_disable_largepages(void);
 333void kvm_arch_flush_shadow(struct kvm *kvm);
 334
 335int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
 336                            int nr_pages);
 337
 338struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
 339unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
 340void kvm_release_page_clean(struct page *page);
 341void kvm_release_page_dirty(struct page *page);
 342void kvm_set_page_dirty(struct page *page);
 343void kvm_set_page_accessed(struct page *page);
 344
 345pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
 346pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
 347pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
 348                       bool write_fault, bool *writable);
 349pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
 350pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
 351                      bool *writable);
 352pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
 353                         struct kvm_memory_slot *slot, gfn_t gfn);
 354int memslot_id(struct kvm *kvm, gfn_t gfn);
 355void kvm_release_pfn_dirty(pfn_t);
 356void kvm_release_pfn_clean(pfn_t pfn);
 357void kvm_set_pfn_dirty(pfn_t pfn);
 358void kvm_set_pfn_accessed(pfn_t pfn);
 359void kvm_get_pfn(pfn_t pfn);
 360
 361int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
 362                        int len);
 363int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
 364                          unsigned long len);
 365int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
 366int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
 367                         int offset, int len);
 368int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
 369                    unsigned long len);
 370int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 371                           void *data, unsigned long len);
 372int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 373                              gpa_t gpa);
 374int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 375int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 376struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 377int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
 378unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
 379void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
 380void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
 381                             gfn_t gfn);
 382
 383void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 384void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
 385void kvm_resched(struct kvm_vcpu *vcpu);
 386void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 387void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 388
 389void kvm_flush_remote_tlbs(struct kvm *kvm);
 390void kvm_reload_remote_mmus(struct kvm *kvm);
 391
 392long kvm_arch_dev_ioctl(struct file *filp,
 393                        unsigned int ioctl, unsigned long arg);
 394long kvm_arch_vcpu_ioctl(struct file *filp,
 395                         unsigned int ioctl, unsigned long arg);
 396
 397int kvm_dev_ioctl_check_extension(long ext);
 398
 399int kvm_get_dirty_log(struct kvm *kvm,
 400                        struct kvm_dirty_log *log, int *is_dirty);
 401int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 402                                struct kvm_dirty_log *log);
 403
 404int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
 405                                   struct
 406                                   kvm_userspace_memory_region *mem,
 407                                   int user_alloc);
 408long kvm_arch_vm_ioctl(struct file *filp,
 409                       unsigned int ioctl, unsigned long arg);
 410
 411int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 412int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 413
 414int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 415                                    struct kvm_translation *tr);
 416
 417int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 418int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 419int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 420                                  struct kvm_sregs *sregs);
 421int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 422                                  struct kvm_sregs *sregs);
 423int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 424                                    struct kvm_mp_state *mp_state);
 425int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 426                                    struct kvm_mp_state *mp_state);
 427int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 428                                        struct kvm_guest_debug *dbg);
 429int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 430
 431int kvm_arch_init(void *opaque);
 432void kvm_arch_exit(void);
 433
 434int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
 435void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
 436
 437void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
 438void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 439void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
 440struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
 441int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
 442void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
 443
 444int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
 445int kvm_arch_hardware_enable(void *garbage);
 446void kvm_arch_hardware_disable(void *garbage);
 447int kvm_arch_hardware_setup(void);
 448void kvm_arch_hardware_unsetup(void);
 449void kvm_arch_check_processor_compat(void *rtn);
 450int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 451
 452void kvm_free_physmem(struct kvm *kvm);
 453
 454#ifndef __KVM_HAVE_ARCH_VM_ALLOC
 455static inline struct kvm *kvm_arch_alloc_vm(void)
 456{
 457        return kzalloc(sizeof(struct kvm), GFP_KERNEL);
 458}
 459
 460static inline void kvm_arch_free_vm(struct kvm *kvm)
 461{
 462        kfree(kvm);
 463}
 464#endif
 465
 466int kvm_arch_init_vm(struct kvm *kvm);
 467void kvm_arch_destroy_vm(struct kvm *kvm);
 468void kvm_free_all_assigned_devices(struct kvm *kvm);
 469void kvm_arch_sync_events(struct kvm *kvm);
 470
 471int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
 472void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 473
 474int kvm_is_mmio_pfn(pfn_t pfn);
 475
 476struct kvm_irq_ack_notifier {
 477        struct hlist_node link;
 478        unsigned gsi;
 479        void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
 480};
 481
 482struct kvm_assigned_dev_kernel {
 483        struct kvm_irq_ack_notifier ack_notifier;
 484        struct list_head list;
 485        int assigned_dev_id;
 486        int host_segnr;
 487        int host_busnr;
 488        int host_devfn;
 489        unsigned int entries_nr;
 490        int host_irq;
 491        bool host_irq_disabled;
 492        struct msix_entry *host_msix_entries;
 493        int guest_irq;
 494        struct msix_entry *guest_msix_entries;
 495        unsigned long irq_requested_type;
 496        int irq_source_id;
 497        int flags;
 498        struct pci_dev *dev;
 499        struct kvm *kvm;
 500        spinlock_t intx_lock;
 501        char irq_name[32];
 502};
 503
 504struct kvm_irq_mask_notifier {
 505        void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
 506        int irq;
 507        struct hlist_node link;
 508};
 509
 510void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
 511                                    struct kvm_irq_mask_notifier *kimn);
 512void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
 513                                      struct kvm_irq_mask_notifier *kimn);
 514void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
 515                             bool mask);
 516
 517#ifdef __KVM_HAVE_IOAPIC
 518void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
 519                                   union kvm_ioapic_redirect_entry *entry,
 520                                   unsigned long *deliver_bitmask);
 521#endif
 522int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
 523int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
 524                int irq_source_id, int level);
 525void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
 526void kvm_register_irq_ack_notifier(struct kvm *kvm,
 527                                   struct kvm_irq_ack_notifier *kian);
 528void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
 529                                   struct kvm_irq_ack_notifier *kian);
 530int kvm_request_irq_source_id(struct kvm *kvm);
 531void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
 532
 533/* For vcpu->arch.iommu_flags */
 534#define KVM_IOMMU_CACHE_COHERENCY       0x1
 535
 536#ifdef CONFIG_IOMMU_API
 537int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
 538int kvm_iommu_map_guest(struct kvm *kvm);
 539int kvm_iommu_unmap_guest(struct kvm *kvm);
 540int kvm_assign_device(struct kvm *kvm,
 541                      struct kvm_assigned_dev_kernel *assigned_dev);
 542int kvm_deassign_device(struct kvm *kvm,
 543                        struct kvm_assigned_dev_kernel *assigned_dev);
 544#else /* CONFIG_IOMMU_API */
 545static inline int kvm_iommu_map_pages(struct kvm *kvm,
 546                                      struct kvm_memory_slot *slot)
 547{
 548        return 0;
 549}
 550
 551static inline int kvm_iommu_map_guest(struct kvm *kvm)
 552{
 553        return -ENODEV;
 554}
 555
 556static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
 557{
 558        return 0;
 559}
 560
 561static inline int kvm_assign_device(struct kvm *kvm,
 562                struct kvm_assigned_dev_kernel *assigned_dev)
 563{
 564        return 0;
 565}
 566
 567static inline int kvm_deassign_device(struct kvm *kvm,
 568                struct kvm_assigned_dev_kernel *assigned_dev)
 569{
 570        return 0;
 571}
 572#endif /* CONFIG_IOMMU_API */
 573
 574static inline void kvm_guest_enter(void)
 575{
 576        account_system_vtime(current);
 577        current->flags |= PF_VCPU;
 578}
 579
 580static inline void kvm_guest_exit(void)
 581{
 582        account_system_vtime(current);
 583        current->flags &= ~PF_VCPU;
 584}
 585
 586static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
 587                                               gfn_t gfn)
 588{
 589        return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
 590}
 591
 592static inline gpa_t gfn_to_gpa(gfn_t gfn)
 593{
 594        return (gpa_t)gfn << PAGE_SHIFT;
 595}
 596
 597static inline gfn_t gpa_to_gfn(gpa_t gpa)
 598{
 599        return (gfn_t)(gpa >> PAGE_SHIFT);
 600}
 601
 602static inline hpa_t pfn_to_hpa(pfn_t pfn)
 603{
 604        return (hpa_t)pfn << PAGE_SHIFT;
 605}
 606
 607static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
 608{
 609        set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
 610}
 611
 612enum kvm_stat_kind {
 613        KVM_STAT_VM,
 614        KVM_STAT_VCPU,
 615};
 616
 617struct kvm_stats_debugfs_item {
 618        const char *name;
 619        int offset;
 620        enum kvm_stat_kind kind;
 621        struct dentry *dentry;
 622};
 623extern struct kvm_stats_debugfs_item debugfs_entries[];
 624extern struct dentry *kvm_debugfs_dir;
 625
 626#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
 627static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
 628{
 629        if (unlikely(vcpu->kvm->mmu_notifier_count))
 630                return 1;
 631        /*
 632         * Both reads happen under the mmu_lock and both values are
 633         * modified under mmu_lock, so there's no need of smb_rmb()
 634         * here in between, otherwise mmu_notifier_count should be
 635         * read before mmu_notifier_seq, see
 636         * mmu_notifier_invalidate_range_end write side.
 637         */
 638        if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
 639                return 1;
 640        return 0;
 641}
 642#endif
 643
 644#ifdef CONFIG_HAVE_KVM_IRQCHIP
 645
 646#define KVM_MAX_IRQ_ROUTES 1024
 647
 648int kvm_setup_default_irq_routing(struct kvm *kvm);
 649int kvm_set_irq_routing(struct kvm *kvm,
 650                        const struct kvm_irq_routing_entry *entries,
 651                        unsigned nr,
 652                        unsigned flags);
 653void kvm_free_irq_routing(struct kvm *kvm);
 654
 655#else
 656
 657static inline void kvm_free_irq_routing(struct kvm *kvm) {}
 658
 659#endif
 660
 661#ifdef CONFIG_HAVE_KVM_EVENTFD
 662
 663void kvm_eventfd_init(struct kvm *kvm);
 664int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
 665void kvm_irqfd_release(struct kvm *kvm);
 666void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
 667int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 668
 669#else
 670
 671static inline void kvm_eventfd_init(struct kvm *kvm) {}
 672
 673static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
 674{
 675        return -EINVAL;
 676}
 677
 678static inline void kvm_irqfd_release(struct kvm *kvm) {}
 679
 680#ifdef CONFIG_HAVE_KVM_IRQCHIP
 681static inline void kvm_irq_routing_update(struct kvm *kvm,
 682                                          struct kvm_irq_routing_table *irq_rt)
 683{
 684        rcu_assign_pointer(kvm->irq_routing, irq_rt);
 685}
 686#endif
 687
 688static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 689{
 690        return -ENOSYS;
 691}
 692
 693#endif /* CONFIG_HAVE_KVM_EVENTFD */
 694
 695#ifdef CONFIG_KVM_APIC_ARCHITECTURE
 696static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
 697{
 698        return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
 699}
 700#endif
 701
 702#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
 703
 704long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
 705                                  unsigned long arg);
 706
 707#else
 708
 709static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
 710                                                unsigned long arg)
 711{
 712        return -ENOTTY;
 713}
 714
 715#endif
 716
 717static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
 718{
 719        set_bit(req, &vcpu->requests);
 720}
 721
 722static inline bool kvm_make_check_request(int req, struct kvm_vcpu *vcpu)
 723{
 724        return test_and_set_bit(req, &vcpu->requests);
 725}
 726
 727static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
 728{
 729        if (test_bit(req, &vcpu->requests)) {
 730                clear_bit(req, &vcpu->requests);
 731                return true;
 732        } else {
 733                return false;
 734        }
 735}
 736
 737#endif
 738
 739