linux/include/linux/kvm_host.h
<<
>>
Prefs
   1#ifndef __KVM_HOST_H
   2#define __KVM_HOST_H
   3
   4/*
   5 * This work is licensed under the terms of the GNU GPL, version 2.  See
   6 * the COPYING file in the top-level directory.
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/hardirq.h>
  11#include <linux/list.h>
  12#include <linux/mutex.h>
  13#include <linux/spinlock.h>
  14#include <linux/signal.h>
  15#include <linux/sched.h>
  16#include <linux/bug.h>
  17#include <linux/mm.h>
  18#include <linux/mmu_notifier.h>
  19#include <linux/preempt.h>
  20#include <linux/msi.h>
  21#include <linux/slab.h>
  22#include <linux/rcupdate.h>
  23#include <linux/ratelimit.h>
  24#include <asm/signal.h>
  25
  26#include <linux/kvm.h>
  27#include <linux/kvm_para.h>
  28
  29#include <linux/kvm_types.h>
  30
  31#include <asm/kvm_host.h>
  32
  33#ifndef KVM_MMIO_SIZE
  34#define KVM_MMIO_SIZE 8
  35#endif
  36
  37/*
  38 * If we support unaligned MMIO, at most one fragment will be split into two:
  39 */
  40#ifdef KVM_UNALIGNED_MMIO
  41#  define KVM_EXTRA_MMIO_FRAGMENTS 1
  42#else
  43#  define KVM_EXTRA_MMIO_FRAGMENTS 0
  44#endif
  45
  46#define KVM_USER_MMIO_SIZE 8
  47
  48#define KVM_MAX_MMIO_FRAGMENTS \
  49        (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
  50
  51/*
  52 * vcpu->requests bit members
  53 */
  54#define KVM_REQ_TLB_FLUSH          0
  55#define KVM_REQ_MIGRATE_TIMER      1
  56#define KVM_REQ_REPORT_TPR_ACCESS  2
  57#define KVM_REQ_MMU_RELOAD         3
  58#define KVM_REQ_TRIPLE_FAULT       4
  59#define KVM_REQ_PENDING_TIMER      5
  60#define KVM_REQ_UNHALT             6
  61#define KVM_REQ_MMU_SYNC           7
  62#define KVM_REQ_CLOCK_UPDATE       8
  63#define KVM_REQ_KICK               9
  64#define KVM_REQ_DEACTIVATE_FPU    10
  65#define KVM_REQ_EVENT             11
  66#define KVM_REQ_APF_HALT          12
  67#define KVM_REQ_STEAL_UPDATE      13
  68#define KVM_REQ_NMI               14
  69#define KVM_REQ_IMMEDIATE_EXIT    15
  70#define KVM_REQ_PMU               16
  71#define KVM_REQ_PMI               17
  72
  73#define KVM_USERSPACE_IRQ_SOURCE_ID     0
  74
  75struct kvm;
  76struct kvm_vcpu;
  77extern struct kmem_cache *kvm_vcpu_cache;
  78
  79struct kvm_io_range {
  80        gpa_t addr;
  81        int len;
  82        struct kvm_io_device *dev;
  83};
  84
  85#define NR_IOBUS_DEVS 1000
  86
  87struct kvm_io_bus {
  88        int                   dev_count;
  89        struct kvm_io_range range[];
  90};
  91
  92enum kvm_bus {
  93        KVM_MMIO_BUS,
  94        KVM_PIO_BUS,
  95        KVM_NR_BUSES
  96};
  97
  98int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  99                     int len, const void *val);
 100int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
 101                    void *val);
 102int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 103                            int len, struct kvm_io_device *dev);
 104int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 105                              struct kvm_io_device *dev);
 106
 107#ifdef CONFIG_KVM_ASYNC_PF
 108struct kvm_async_pf {
 109        struct work_struct work;
 110        struct list_head link;
 111        struct list_head queue;
 112        struct kvm_vcpu *vcpu;
 113        struct mm_struct *mm;
 114        gva_t gva;
 115        unsigned long addr;
 116        struct kvm_arch_async_pf arch;
 117        struct page *page;
 118        bool done;
 119};
 120
 121void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
 122void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
 123int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
 124                       struct kvm_arch_async_pf *arch);
 125int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 126#endif
 127
 128enum {
 129        OUTSIDE_GUEST_MODE,
 130        IN_GUEST_MODE,
 131        EXITING_GUEST_MODE,
 132        READING_SHADOW_PAGE_TABLES,
 133};
 134
 135/*
 136 * Sometimes a large or cross-page mmio needs to be broken up into separate
 137 * exits for userspace servicing.
 138 */
 139struct kvm_mmio_fragment {
 140        gpa_t gpa;
 141        void *data;
 142        unsigned len;
 143};
 144
 145struct kvm_vcpu {
 146        struct kvm *kvm;
 147#ifdef CONFIG_PREEMPT_NOTIFIERS
 148        struct preempt_notifier preempt_notifier;
 149#endif
 150        int cpu;
 151        int vcpu_id;
 152        int srcu_idx;
 153        int mode;
 154        unsigned long requests;
 155        unsigned long guest_debug;
 156
 157        struct mutex mutex;
 158        struct kvm_run *run;
 159
 160        int fpu_active;
 161        int guest_fpu_loaded, guest_xcr0_loaded;
 162        wait_queue_head_t wq;
 163        struct pid *pid;
 164        int sigset_active;
 165        sigset_t sigset;
 166        struct kvm_vcpu_stat stat;
 167
 168#ifdef CONFIG_HAS_IOMEM
 169        int mmio_needed;
 170        int mmio_read_completed;
 171        int mmio_is_write;
 172        int mmio_cur_fragment;
 173        int mmio_nr_fragments;
 174        struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
 175#endif
 176
 177#ifdef CONFIG_KVM_ASYNC_PF
 178        struct {
 179                u32 queued;
 180                struct list_head queue;
 181                struct list_head done;
 182                spinlock_t lock;
 183        } async_pf;
 184#endif
 185
 186        struct kvm_vcpu_arch arch;
 187};
 188
 189static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
 190{
 191        return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
 192}
 193
 194/*
 195 * Some of the bitops functions do not support too long bitmaps.
 196 * This number must be determined not to exceed such limits.
 197 */
 198#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 199
 200struct kvm_memory_slot {
 201        gfn_t base_gfn;
 202        unsigned long npages;
 203        unsigned long flags;
 204        unsigned long *rmap;
 205        unsigned long *dirty_bitmap;
 206        struct kvm_arch_memory_slot arch;
 207        unsigned long userspace_addr;
 208        int user_alloc;
 209        int id;
 210};
 211
 212static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
 213{
 214        return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
 215}
 216
 217struct kvm_kernel_irq_routing_entry {
 218        u32 gsi;
 219        u32 type;
 220        int (*set)(struct kvm_kernel_irq_routing_entry *e,
 221                   struct kvm *kvm, int irq_source_id, int level);
 222        union {
 223                struct {
 224                        unsigned irqchip;
 225                        unsigned pin;
 226                } irqchip;
 227                struct msi_msg msi;
 228        };
 229        struct hlist_node link;
 230};
 231
 232#ifdef __KVM_HAVE_IOAPIC
 233
 234struct kvm_irq_routing_table {
 235        int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
 236        struct kvm_kernel_irq_routing_entry *rt_entries;
 237        u32 nr_rt_entries;
 238        /*
 239         * Array indexed by gsi. Each entry contains list of irq chips
 240         * the gsi is connected to.
 241         */
 242        struct hlist_head map[0];
 243};
 244
 245#else
 246
 247struct kvm_irq_routing_table {};
 248
 249#endif
 250
 251#ifndef KVM_MEM_SLOTS_NUM
 252#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
 253#endif
 254
 255/*
 256 * Note:
 257 * memslots are not sorted by id anymore, please use id_to_memslot()
 258 * to get the memslot by its id.
 259 */
 260struct kvm_memslots {
 261        u64 generation;
 262        struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
 263        /* The mapping table from slot id to the index in memslots[]. */
 264        int id_to_index[KVM_MEM_SLOTS_NUM];
 265};
 266
 267struct kvm {
 268        spinlock_t mmu_lock;
 269        struct mutex slots_lock;
 270        struct mm_struct *mm; /* userspace tied to this vm */
 271        struct kvm_memslots *memslots;
 272        struct srcu_struct srcu;
 273#ifdef CONFIG_KVM_APIC_ARCHITECTURE
 274        u32 bsp_vcpu_id;
 275#endif
 276        struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
 277        atomic_t online_vcpus;
 278        int last_boosted_vcpu;
 279        struct list_head vm_list;
 280        struct mutex lock;
 281        struct kvm_io_bus *buses[KVM_NR_BUSES];
 282#ifdef CONFIG_HAVE_KVM_EVENTFD
 283        struct {
 284                spinlock_t        lock;
 285                struct list_head  items;
 286        } irqfds;
 287        struct list_head ioeventfds;
 288#endif
 289        struct kvm_vm_stat stat;
 290        struct kvm_arch arch;
 291        atomic_t users_count;
 292#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
 293        struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
 294        spinlock_t ring_lock;
 295        struct list_head coalesced_zones;
 296#endif
 297
 298        struct mutex irq_lock;
 299#ifdef CONFIG_HAVE_KVM_IRQCHIP
 300        /*
 301         * Update side is protected by irq_lock and,
 302         * if configured, irqfds.lock.
 303         */
 304        struct kvm_irq_routing_table __rcu *irq_routing;
 305        struct hlist_head mask_notifier_list;
 306        struct hlist_head irq_ack_notifier_list;
 307#endif
 308
 309#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
 310        struct mmu_notifier mmu_notifier;
 311        unsigned long mmu_notifier_seq;
 312        long mmu_notifier_count;
 313#endif
 314        long tlbs_dirty;
 315};
 316
 317/* The guest did something we don't support. */
 318#define pr_unimpl(vcpu, fmt, ...)                                       \
 319        pr_err_ratelimited("kvm: %i: cpu%i " fmt,                       \
 320                           current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__)
 321
 322#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
 323#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
 324
 325static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 326{
 327        smp_rmb();
 328        return kvm->vcpus[i];
 329}
 330
 331#define kvm_for_each_vcpu(idx, vcpup, kvm) \
 332        for (idx = 0; \
 333             idx < atomic_read(&kvm->online_vcpus) && \
 334             (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
 335             idx++)
 336
 337#define kvm_for_each_memslot(memslot, slots)    \
 338        for (memslot = &slots->memslots[0];     \
 339              memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
 340                memslot++)
 341
 342int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
 343void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
 344
 345void vcpu_load(struct kvm_vcpu *vcpu);
 346void vcpu_put(struct kvm_vcpu *vcpu);
 347
 348int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 349                  struct module *module);
 350void kvm_exit(void);
 351
 352void kvm_get_kvm(struct kvm *kvm);
 353void kvm_put_kvm(struct kvm *kvm);
 354void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
 355
 356static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
 357{
 358        return rcu_dereference_check(kvm->memslots,
 359                        srcu_read_lock_held(&kvm->srcu)
 360                        || lockdep_is_held(&kvm->slots_lock));
 361}
 362
 363static inline struct kvm_memory_slot *
 364id_to_memslot(struct kvm_memslots *slots, int id)
 365{
 366        int index = slots->id_to_index[id];
 367        struct kvm_memory_slot *slot;
 368
 369        slot = &slots->memslots[index];
 370
 371        WARN_ON(slot->id != id);
 372        return slot;
 373}
 374
 375#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
 376#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
 377static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
 378
 379extern struct page *bad_page;
 380extern struct page *fault_page;
 381
 382extern pfn_t bad_pfn;
 383extern pfn_t fault_pfn;
 384
 385int is_error_page(struct page *page);
 386int is_error_pfn(pfn_t pfn);
 387int is_hwpoison_pfn(pfn_t pfn);
 388int is_fault_pfn(pfn_t pfn);
 389int is_noslot_pfn(pfn_t pfn);
 390int is_invalid_pfn(pfn_t pfn);
 391int kvm_is_error_hva(unsigned long addr);
 392int kvm_set_memory_region(struct kvm *kvm,
 393                          struct kvm_userspace_memory_region *mem,
 394                          int user_alloc);
 395int __kvm_set_memory_region(struct kvm *kvm,
 396                            struct kvm_userspace_memory_region *mem,
 397                            int user_alloc);
 398void kvm_arch_free_memslot(struct kvm_memory_slot *free,
 399                           struct kvm_memory_slot *dont);
 400int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
 401int kvm_arch_prepare_memory_region(struct kvm *kvm,
 402                                struct kvm_memory_slot *memslot,
 403                                struct kvm_memory_slot old,
 404                                struct kvm_userspace_memory_region *mem,
 405                                int user_alloc);
 406void kvm_arch_commit_memory_region(struct kvm *kvm,
 407                                struct kvm_userspace_memory_region *mem,
 408                                struct kvm_memory_slot old,
 409                                int user_alloc);
 410bool kvm_largepages_enabled(void);
 411void kvm_disable_largepages(void);
 412void kvm_arch_flush_shadow(struct kvm *kvm);
 413
 414int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
 415                            int nr_pages);
 416
 417struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
 418unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
 419void kvm_release_page_clean(struct page *page);
 420void kvm_release_page_dirty(struct page *page);
 421void kvm_set_page_dirty(struct page *page);
 422void kvm_set_page_accessed(struct page *page);
 423
 424pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
 425pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
 426pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
 427                       bool write_fault, bool *writable);
 428pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
 429pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
 430                      bool *writable);
 431pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
 432                         struct kvm_memory_slot *slot, gfn_t gfn);
 433void kvm_release_pfn_dirty(pfn_t);
 434void kvm_release_pfn_clean(pfn_t pfn);
 435void kvm_set_pfn_dirty(pfn_t pfn);
 436void kvm_set_pfn_accessed(pfn_t pfn);
 437void kvm_get_pfn(pfn_t pfn);
 438
 439int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
 440                        int len);
 441int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
 442                          unsigned long len);
 443int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
 444int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 445                           void *data, unsigned long len);
 446int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
 447                         int offset, int len);
 448int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
 449                    unsigned long len);
 450int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 451                           void *data, unsigned long len);
 452int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 453                              gpa_t gpa);
 454int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 455int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 456struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 457int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
 458unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
 459void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
 460void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
 461                             gfn_t gfn);
 462
 463void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 464void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 465bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
 466void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
 467void kvm_resched(struct kvm_vcpu *vcpu);
 468void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 469void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 470
 471void kvm_flush_remote_tlbs(struct kvm *kvm);
 472void kvm_reload_remote_mmus(struct kvm *kvm);
 473
 474long kvm_arch_dev_ioctl(struct file *filp,
 475                        unsigned int ioctl, unsigned long arg);
 476long kvm_arch_vcpu_ioctl(struct file *filp,
 477                         unsigned int ioctl, unsigned long arg);
 478int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
 479
 480int kvm_dev_ioctl_check_extension(long ext);
 481
 482int kvm_get_dirty_log(struct kvm *kvm,
 483                        struct kvm_dirty_log *log, int *is_dirty);
 484int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 485                                struct kvm_dirty_log *log);
 486
 487int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
 488                                   struct
 489                                   kvm_userspace_memory_region *mem,
 490                                   int user_alloc);
 491long kvm_arch_vm_ioctl(struct file *filp,
 492                       unsigned int ioctl, unsigned long arg);
 493
 494int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 495int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 496
 497int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 498                                    struct kvm_translation *tr);
 499
 500int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 501int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 502int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 503                                  struct kvm_sregs *sregs);
 504int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 505                                  struct kvm_sregs *sregs);
 506int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 507                                    struct kvm_mp_state *mp_state);
 508int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 509                                    struct kvm_mp_state *mp_state);
 510int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 511                                        struct kvm_guest_debug *dbg);
 512int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 513
 514int kvm_arch_init(void *opaque);
 515void kvm_arch_exit(void);
 516
 517int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
 518void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
 519
 520void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
 521void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 522void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
 523struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
 524int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
 525void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
 526
 527int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
 528int kvm_arch_hardware_enable(void *garbage);
 529void kvm_arch_hardware_disable(void *garbage);
 530int kvm_arch_hardware_setup(void);
 531void kvm_arch_hardware_unsetup(void);
 532void kvm_arch_check_processor_compat(void *rtn);
 533int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 534int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
 535
 536void kvm_free_physmem(struct kvm *kvm);
 537
 538#ifndef __KVM_HAVE_ARCH_VM_ALLOC
 539static inline struct kvm *kvm_arch_alloc_vm(void)
 540{
 541        return kzalloc(sizeof(struct kvm), GFP_KERNEL);
 542}
 543
 544static inline void kvm_arch_free_vm(struct kvm *kvm)
 545{
 546        kfree(kvm);
 547}
 548#endif
 549
 550static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
 551{
 552#ifdef __KVM_HAVE_ARCH_WQP
 553        return vcpu->arch.wqp;
 554#else
 555        return &vcpu->wq;
 556#endif
 557}
 558
 559int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
 560void kvm_arch_destroy_vm(struct kvm *kvm);
 561void kvm_free_all_assigned_devices(struct kvm *kvm);
 562void kvm_arch_sync_events(struct kvm *kvm);
 563
 564int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
 565void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 566
 567int kvm_is_mmio_pfn(pfn_t pfn);
 568
 569struct kvm_irq_ack_notifier {
 570        struct hlist_node link;
 571        unsigned gsi;
 572        void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
 573};
 574
 575struct kvm_assigned_dev_kernel {
 576        struct kvm_irq_ack_notifier ack_notifier;
 577        struct list_head list;
 578        int assigned_dev_id;
 579        int host_segnr;
 580        int host_busnr;
 581        int host_devfn;
 582        unsigned int entries_nr;
 583        int host_irq;
 584        bool host_irq_disabled;
 585        bool pci_2_3;
 586        struct msix_entry *host_msix_entries;
 587        int guest_irq;
 588        struct msix_entry *guest_msix_entries;
 589        unsigned long irq_requested_type;
 590        int irq_source_id;
 591        int flags;
 592        struct pci_dev *dev;
 593        struct kvm *kvm;
 594        spinlock_t intx_lock;
 595        spinlock_t intx_mask_lock;
 596        char irq_name[32];
 597        struct pci_saved_state *pci_saved_state;
 598};
 599
 600struct kvm_irq_mask_notifier {
 601        void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
 602        int irq;
 603        struct hlist_node link;
 604};
 605
 606void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
 607                                    struct kvm_irq_mask_notifier *kimn);
 608void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
 609                                      struct kvm_irq_mask_notifier *kimn);
 610void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
 611                             bool mask);
 612
 613#ifdef __KVM_HAVE_IOAPIC
 614void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
 615                                   union kvm_ioapic_redirect_entry *entry,
 616                                   unsigned long *deliver_bitmask);
 617#endif
 618int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
 619int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
 620                int irq_source_id, int level);
 621void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
 622void kvm_register_irq_ack_notifier(struct kvm *kvm,
 623                                   struct kvm_irq_ack_notifier *kian);
 624void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
 625                                   struct kvm_irq_ack_notifier *kian);
 626int kvm_request_irq_source_id(struct kvm *kvm);
 627void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
 628
 629/* For vcpu->arch.iommu_flags */
 630#define KVM_IOMMU_CACHE_COHERENCY       0x1
 631
 632#ifdef CONFIG_IOMMU_API
 633int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
 634void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
 635int kvm_iommu_map_guest(struct kvm *kvm);
 636int kvm_iommu_unmap_guest(struct kvm *kvm);
 637int kvm_assign_device(struct kvm *kvm,
 638                      struct kvm_assigned_dev_kernel *assigned_dev);
 639int kvm_deassign_device(struct kvm *kvm,
 640                        struct kvm_assigned_dev_kernel *assigned_dev);
 641#else /* CONFIG_IOMMU_API */
 642static inline int kvm_iommu_map_pages(struct kvm *kvm,
 643                                      struct kvm_memory_slot *slot)
 644{
 645        return 0;
 646}
 647
 648static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
 649                                         struct kvm_memory_slot *slot)
 650{
 651}
 652
 653static inline int kvm_iommu_map_guest(struct kvm *kvm)
 654{
 655        return -ENODEV;
 656}
 657
 658static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
 659{
 660        return 0;
 661}
 662
 663static inline int kvm_assign_device(struct kvm *kvm,
 664                struct kvm_assigned_dev_kernel *assigned_dev)
 665{
 666        return 0;
 667}
 668
 669static inline int kvm_deassign_device(struct kvm *kvm,
 670                struct kvm_assigned_dev_kernel *assigned_dev)
 671{
 672        return 0;
 673}
 674#endif /* CONFIG_IOMMU_API */
 675
 676static inline void kvm_guest_enter(void)
 677{
 678        BUG_ON(preemptible());
 679        account_system_vtime(current);
 680        current->flags |= PF_VCPU;
 681        /* KVM does not hold any references to rcu protected data when it
 682         * switches CPU into a guest mode. In fact switching to a guest mode
 683         * is very similar to exiting to userspase from rcu point of view. In
 684         * addition CPU may stay in a guest mode for quite a long time (up to
 685         * one time slice). Lets treat guest mode as quiescent state, just like
 686         * we do with user-mode execution.
 687         */
 688        rcu_virt_note_context_switch(smp_processor_id());
 689}
 690
 691static inline void kvm_guest_exit(void)
 692{
 693        account_system_vtime(current);
 694        current->flags &= ~PF_VCPU;
 695}
 696
 697/*
 698 * search_memslots() and __gfn_to_memslot() are here because they are
 699 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
 700 * gfn_to_memslot() itself isn't here as an inline because that would
 701 * bloat other code too much.
 702 */
 703static inline struct kvm_memory_slot *
 704search_memslots(struct kvm_memslots *slots, gfn_t gfn)
 705{
 706        struct kvm_memory_slot *memslot;
 707
 708        kvm_for_each_memslot(memslot, slots)
 709                if (gfn >= memslot->base_gfn &&
 710                      gfn < memslot->base_gfn + memslot->npages)
 711                        return memslot;
 712
 713        return NULL;
 714}
 715
 716static inline struct kvm_memory_slot *
 717__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 718{
 719        return search_memslots(slots, gfn);
 720}
 721
 722static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
 723{
 724        return gfn_to_memslot(kvm, gfn)->id;
 725}
 726
 727static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 728{
 729        /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
 730        return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
 731                (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
 732}
 733
 734static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
 735                                               gfn_t gfn)
 736{
 737        return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
 738}
 739
 740static inline gpa_t gfn_to_gpa(gfn_t gfn)
 741{
 742        return (gpa_t)gfn << PAGE_SHIFT;
 743}
 744
 745static inline gfn_t gpa_to_gfn(gpa_t gpa)
 746{
 747        return (gfn_t)(gpa >> PAGE_SHIFT);
 748}
 749
 750static inline hpa_t pfn_to_hpa(pfn_t pfn)
 751{
 752        return (hpa_t)pfn << PAGE_SHIFT;
 753}
 754
 755static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
 756{
 757        set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
 758}
 759
 760enum kvm_stat_kind {
 761        KVM_STAT_VM,
 762        KVM_STAT_VCPU,
 763};
 764
 765struct kvm_stats_debugfs_item {
 766        const char *name;
 767        int offset;
 768        enum kvm_stat_kind kind;
 769        struct dentry *dentry;
 770};
 771extern struct kvm_stats_debugfs_item debugfs_entries[];
 772extern struct dentry *kvm_debugfs_dir;
 773
 774#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
 775static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
 776{
 777        if (unlikely(vcpu->kvm->mmu_notifier_count))
 778                return 1;
 779        /*
 780         * Ensure the read of mmu_notifier_count happens before the read
 781         * of mmu_notifier_seq.  This interacts with the smp_wmb() in
 782         * mmu_notifier_invalidate_range_end to make sure that the caller
 783         * either sees the old (non-zero) value of mmu_notifier_count or
 784         * the new (incremented) value of mmu_notifier_seq.
 785         * PowerPC Book3s HV KVM calls this under a per-page lock
 786         * rather than under kvm->mmu_lock, for scalability, so
 787         * can't rely on kvm->mmu_lock to keep things ordered.
 788         */
 789        smp_rmb();
 790        if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
 791                return 1;
 792        return 0;
 793}
 794#endif
 795
 796#ifdef CONFIG_HAVE_KVM_IRQCHIP
 797
 798#define KVM_MAX_IRQ_ROUTES 1024
 799
 800int kvm_setup_default_irq_routing(struct kvm *kvm);
 801int kvm_set_irq_routing(struct kvm *kvm,
 802                        const struct kvm_irq_routing_entry *entries,
 803                        unsigned nr,
 804                        unsigned flags);
 805void kvm_free_irq_routing(struct kvm *kvm);
 806
 807int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
 808
 809#else
 810
 811static inline void kvm_free_irq_routing(struct kvm *kvm) {}
 812
 813#endif
 814
 815#ifdef CONFIG_HAVE_KVM_EVENTFD
 816
 817void kvm_eventfd_init(struct kvm *kvm);
 818int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
 819void kvm_irqfd_release(struct kvm *kvm);
 820void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
 821int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 822
 823#else
 824
 825static inline void kvm_eventfd_init(struct kvm *kvm) {}
 826
 827static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 828{
 829        return -EINVAL;
 830}
 831
 832static inline void kvm_irqfd_release(struct kvm *kvm) {}
 833
 834#ifdef CONFIG_HAVE_KVM_IRQCHIP
 835static inline void kvm_irq_routing_update(struct kvm *kvm,
 836                                          struct kvm_irq_routing_table *irq_rt)
 837{
 838        rcu_assign_pointer(kvm->irq_routing, irq_rt);
 839}
 840#endif
 841
 842static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 843{
 844        return -ENOSYS;
 845}
 846
 847#endif /* CONFIG_HAVE_KVM_EVENTFD */
 848
 849#ifdef CONFIG_KVM_APIC_ARCHITECTURE
 850static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
 851{
 852        return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
 853}
 854
 855bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
 856
 857#else
 858
 859static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
 860
 861#endif
 862
 863#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
 864
 865long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
 866                                  unsigned long arg);
 867
 868#else
 869
 870static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
 871                                                unsigned long arg)
 872{
 873        return -ENOTTY;
 874}
 875
 876#endif
 877
 878static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
 879{
 880        set_bit(req, &vcpu->requests);
 881}
 882
 883static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
 884{
 885        if (test_bit(req, &vcpu->requests)) {
 886                clear_bit(req, &vcpu->requests);
 887                return true;
 888        } else {
 889                return false;
 890        }
 891}
 892
 893#endif
 894
 895