linux/include/linux/kvm_host.h
<<
>>
Prefs
   1#ifndef __KVM_HOST_H
   2#define __KVM_HOST_H
   3
   4/*
   5 * This work is licensed under the terms of the GNU GPL, version 2.  See
   6 * the COPYING file in the top-level directory.
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/hardirq.h>
  11#include <linux/list.h>
  12#include <linux/mutex.h>
  13#include <linux/spinlock.h>
  14#include <linux/signal.h>
  15#include <linux/sched.h>
  16#include <linux/bug.h>
  17#include <linux/mm.h>
  18#include <linux/mmu_notifier.h>
  19#include <linux/preempt.h>
  20#include <linux/msi.h>
  21#include <linux/slab.h>
  22#include <linux/rcupdate.h>
  23#include <linux/ratelimit.h>
  24#include <linux/err.h>
  25#include <linux/irqflags.h>
  26#include <linux/context_tracking.h>
  27#include <linux/irqbypass.h>
  28#include <linux/swait.h>
  29#include <linux/refcount.h>
  30#include <asm/signal.h>
  31
  32#include <linux/kvm.h>
  33#include <linux/kvm_para.h>
  34
  35#include <linux/kvm_types.h>
  36
  37#include <asm/kvm_host.h>
  38
  39#ifndef KVM_MAX_VCPU_ID
  40#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
  41#endif
  42
  43/*
  44 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
  45 * in kvm, other bits are visible for userspace which are defined in
  46 * include/linux/kvm_h.
  47 */
  48#define KVM_MEMSLOT_INVALID     (1UL << 16)
  49
  50/* Two fragments for cross MMIO pages. */
  51#define KVM_MAX_MMIO_FRAGMENTS  2
  52
  53#ifndef KVM_ADDRESS_SPACE_NUM
  54#define KVM_ADDRESS_SPACE_NUM   1
  55#endif
  56
  57/*
  58 * For the normal pfn, the highest 12 bits should be zero,
  59 * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
  60 * mask bit 63 to indicate the noslot pfn.
  61 */
  62#define KVM_PFN_ERR_MASK        (0x7ffULL << 52)
  63#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
  64#define KVM_PFN_NOSLOT          (0x1ULL << 63)
  65
  66#define KVM_PFN_ERR_FAULT       (KVM_PFN_ERR_MASK)
  67#define KVM_PFN_ERR_HWPOISON    (KVM_PFN_ERR_MASK + 1)
  68#define KVM_PFN_ERR_RO_FAULT    (KVM_PFN_ERR_MASK + 2)
  69
  70/*
  71 * error pfns indicate that the gfn is in slot but faild to
  72 * translate it to pfn on host.
  73 */
  74static inline bool is_error_pfn(kvm_pfn_t pfn)
  75{
  76        return !!(pfn & KVM_PFN_ERR_MASK);
  77}
  78
  79/*
  80 * error_noslot pfns indicate that the gfn can not be
  81 * translated to pfn - it is not in slot or failed to
  82 * translate it to pfn.
  83 */
  84static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
  85{
  86        return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
  87}
  88
  89/* noslot pfn indicates that the gfn is not in slot. */
  90static inline bool is_noslot_pfn(kvm_pfn_t pfn)
  91{
  92        return pfn == KVM_PFN_NOSLOT;
  93}
  94
  95/*
  96 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
  97 * provide own defines and kvm_is_error_hva
  98 */
  99#ifndef KVM_HVA_ERR_BAD
 100
 101#define KVM_HVA_ERR_BAD         (PAGE_OFFSET)
 102#define KVM_HVA_ERR_RO_BAD      (PAGE_OFFSET + PAGE_SIZE)
 103
 104static inline bool kvm_is_error_hva(unsigned long addr)
 105{
 106        return addr >= PAGE_OFFSET;
 107}
 108
 109#endif
 110
 111#define KVM_ERR_PTR_BAD_PAGE    (ERR_PTR(-ENOENT))
 112
 113static inline bool is_error_page(struct page *page)
 114{
 115        return IS_ERR(page);
 116}
 117
 118#define KVM_REQUEST_MASK           GENMASK(7,0)
 119#define KVM_REQUEST_NO_WAKEUP      BIT(8)
 120#define KVM_REQUEST_WAIT           BIT(9)
 121/*
 122 * Architecture-independent vcpu->requests bit members
 123 * Bits 4-7 are reserved for more arch-independent bits.
 124 */
 125#define KVM_REQ_TLB_FLUSH         (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 126#define KVM_REQ_MMU_RELOAD        (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 127#define KVM_REQ_PENDING_TIMER     2
 128#define KVM_REQ_UNHALT            3
 129
 130#define KVM_USERSPACE_IRQ_SOURCE_ID             0
 131#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID        1
 132
 133extern struct kmem_cache *kvm_vcpu_cache;
 134
 135extern spinlock_t kvm_lock;
 136extern struct list_head vm_list;
 137
 138struct kvm_io_range {
 139        gpa_t addr;
 140        int len;
 141        struct kvm_io_device *dev;
 142};
 143
 144#define NR_IOBUS_DEVS 1000
 145
 146struct kvm_io_bus {
 147        int dev_count;
 148        int ioeventfd_count;
 149        struct kvm_io_range range[];
 150};
 151
 152enum kvm_bus {
 153        KVM_MMIO_BUS,
 154        KVM_PIO_BUS,
 155        KVM_VIRTIO_CCW_NOTIFY_BUS,
 156        KVM_FAST_MMIO_BUS,
 157        KVM_NR_BUSES
 158};
 159
 160int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
 161                     int len, const void *val);
 162int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
 163                            gpa_t addr, int len, const void *val, long cookie);
 164int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
 165                    int len, void *val);
 166int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 167                            int len, struct kvm_io_device *dev);
 168void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 169                               struct kvm_io_device *dev);
 170struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 171                                         gpa_t addr);
 172
 173#ifdef CONFIG_KVM_ASYNC_PF
 174struct kvm_async_pf {
 175        struct work_struct work;
 176        struct list_head link;
 177        struct list_head queue;
 178        struct kvm_vcpu *vcpu;
 179        struct mm_struct *mm;
 180        gva_t gva;
 181        unsigned long addr;
 182        struct kvm_arch_async_pf arch;
 183        bool   wakeup_all;
 184};
 185
 186void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
 187void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
 188int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
 189                       struct kvm_arch_async_pf *arch);
 190int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 191#endif
 192
 193enum {
 194        OUTSIDE_GUEST_MODE,
 195        IN_GUEST_MODE,
 196        EXITING_GUEST_MODE,
 197        READING_SHADOW_PAGE_TABLES,
 198};
 199
 200/*
 201 * Sometimes a large or cross-page mmio needs to be broken up into separate
 202 * exits for userspace servicing.
 203 */
 204struct kvm_mmio_fragment {
 205        gpa_t gpa;
 206        void *data;
 207        unsigned len;
 208};
 209
 210struct kvm_vcpu {
 211        struct kvm *kvm;
 212#ifdef CONFIG_PREEMPT_NOTIFIERS
 213        struct preempt_notifier preempt_notifier;
 214#endif
 215        int cpu;
 216        int vcpu_id;
 217        int srcu_idx;
 218        int mode;
 219        unsigned long requests;
 220        unsigned long guest_debug;
 221
 222        int pre_pcpu;
 223        struct list_head blocked_vcpu_list;
 224
 225        struct mutex mutex;
 226        struct kvm_run *run;
 227
 228        int guest_fpu_loaded, guest_xcr0_loaded;
 229        struct swait_queue_head wq;
 230        struct pid *pid;
 231        int sigset_active;
 232        sigset_t sigset;
 233        struct kvm_vcpu_stat stat;
 234        unsigned int halt_poll_ns;
 235        bool valid_wakeup;
 236
 237#ifdef CONFIG_HAS_IOMEM
 238        int mmio_needed;
 239        int mmio_read_completed;
 240        int mmio_is_write;
 241        int mmio_cur_fragment;
 242        int mmio_nr_fragments;
 243        struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
 244#endif
 245
 246#ifdef CONFIG_KVM_ASYNC_PF
 247        struct {
 248                u32 queued;
 249                struct list_head queue;
 250                struct list_head done;
 251                spinlock_t lock;
 252        } async_pf;
 253#endif
 254
 255#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
 256        /*
 257         * Cpu relax intercept or pause loop exit optimization
 258         * in_spin_loop: set when a vcpu does a pause loop exit
 259         *  or cpu relax intercepted.
 260         * dy_eligible: indicates whether vcpu is eligible for directed yield.
 261         */
 262        struct {
 263                bool in_spin_loop;
 264                bool dy_eligible;
 265        } spin_loop;
 266#endif
 267        bool preempted;
 268        struct kvm_vcpu_arch arch;
 269        struct dentry *debugfs_dentry;
 270};
 271
 272static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
 273{
 274        /*
 275         * The memory barrier ensures a previous write to vcpu->requests cannot
 276         * be reordered with the read of vcpu->mode.  It pairs with the general
 277         * memory barrier following the write of vcpu->mode in VCPU RUN.
 278         */
 279        smp_mb__before_atomic();
 280        return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
 281}
 282
 283/*
 284 * Some of the bitops functions do not support too long bitmaps.
 285 * This number must be determined not to exceed such limits.
 286 */
 287#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 288
 289struct kvm_memory_slot {
 290        gfn_t base_gfn;
 291        unsigned long npages;
 292        unsigned long *dirty_bitmap;
 293        struct kvm_arch_memory_slot arch;
 294        unsigned long userspace_addr;
 295        u32 flags;
 296        short id;
 297};
 298
 299static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
 300{
 301        return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
 302}
 303
 304struct kvm_s390_adapter_int {
 305        u64 ind_addr;
 306        u64 summary_addr;
 307        u64 ind_offset;
 308        u32 summary_offset;
 309        u32 adapter_id;
 310};
 311
 312struct kvm_hv_sint {
 313        u32 vcpu;
 314        u32 sint;
 315};
 316
 317struct kvm_kernel_irq_routing_entry {
 318        u32 gsi;
 319        u32 type;
 320        int (*set)(struct kvm_kernel_irq_routing_entry *e,
 321                   struct kvm *kvm, int irq_source_id, int level,
 322                   bool line_status);
 323        union {
 324                struct {
 325                        unsigned irqchip;
 326                        unsigned pin;
 327                } irqchip;
 328                struct {
 329                        u32 address_lo;
 330                        u32 address_hi;
 331                        u32 data;
 332                        u32 flags;
 333                        u32 devid;
 334                } msi;
 335                struct kvm_s390_adapter_int adapter;
 336                struct kvm_hv_sint hv_sint;
 337        };
 338        struct hlist_node link;
 339};
 340
 341#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
 342struct kvm_irq_routing_table {
 343        int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
 344        u32 nr_rt_entries;
 345        /*
 346         * Array indexed by gsi. Each entry contains list of irq chips
 347         * the gsi is connected to.
 348         */
 349        struct hlist_head map[0];
 350};
 351#endif
 352
 353#ifndef KVM_PRIVATE_MEM_SLOTS
 354#define KVM_PRIVATE_MEM_SLOTS 0
 355#endif
 356
 357#ifndef KVM_MEM_SLOTS_NUM
 358#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
 359#endif
 360
 361#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
 362static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
 363{
 364        return 0;
 365}
 366#endif
 367
 368/*
 369 * Note:
 370 * memslots are not sorted by id anymore, please use id_to_memslot()
 371 * to get the memslot by its id.
 372 */
 373struct kvm_memslots {
 374        u64 generation;
 375        struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
 376        /* The mapping table from slot id to the index in memslots[]. */
 377        short id_to_index[KVM_MEM_SLOTS_NUM];
 378        atomic_t lru_slot;
 379        int used_slots;
 380};
 381
 382struct kvm {
 383        spinlock_t mmu_lock;
 384        struct mutex slots_lock;
 385        struct mm_struct *mm; /* userspace tied to this vm */
 386        struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
 387        struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
 388
 389        /*
 390         * created_vcpus is protected by kvm->lock, and is incremented
 391         * at the beginning of KVM_CREATE_VCPU.  online_vcpus is only
 392         * incremented after storing the kvm_vcpu pointer in vcpus,
 393         * and is accessed atomically.
 394         */
 395        atomic_t online_vcpus;
 396        int created_vcpus;
 397        int last_boosted_vcpu;
 398        struct list_head vm_list;
 399        struct mutex lock;
 400        struct kvm_io_bus *buses[KVM_NR_BUSES];
 401#ifdef CONFIG_HAVE_KVM_EVENTFD
 402        struct {
 403                spinlock_t        lock;
 404                struct list_head  items;
 405                struct list_head  resampler_list;
 406                struct mutex      resampler_lock;
 407        } irqfds;
 408        struct list_head ioeventfds;
 409#endif
 410        struct kvm_vm_stat stat;
 411        struct kvm_arch arch;
 412        refcount_t users_count;
 413#ifdef CONFIG_KVM_MMIO
 414        struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
 415        spinlock_t ring_lock;
 416        struct list_head coalesced_zones;
 417#endif
 418
 419        struct mutex irq_lock;
 420#ifdef CONFIG_HAVE_KVM_IRQCHIP
 421        /*
 422         * Update side is protected by irq_lock.
 423         */
 424        struct kvm_irq_routing_table __rcu *irq_routing;
 425#endif
 426#ifdef CONFIG_HAVE_KVM_IRQFD
 427        struct hlist_head irq_ack_notifier_list;
 428#endif
 429
 430#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 431        struct mmu_notifier mmu_notifier;
 432        unsigned long mmu_notifier_seq;
 433        long mmu_notifier_count;
 434#endif
 435        long tlbs_dirty;
 436        struct list_head devices;
 437        struct dentry *debugfs_dentry;
 438        struct kvm_stat_data **debugfs_stat_data;
 439        struct srcu_struct srcu;
 440        struct srcu_struct irq_srcu;
 441};
 442
 443#define kvm_err(fmt, ...) \
 444        pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 445#define kvm_info(fmt, ...) \
 446        pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 447#define kvm_debug(fmt, ...) \
 448        pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 449#define kvm_debug_ratelimited(fmt, ...) \
 450        pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
 451                             ## __VA_ARGS__)
 452#define kvm_pr_unimpl(fmt, ...) \
 453        pr_err_ratelimited("kvm [%i]: " fmt, \
 454                           task_tgid_nr(current), ## __VA_ARGS__)
 455
 456/* The guest did something we don't support. */
 457#define vcpu_unimpl(vcpu, fmt, ...)                                     \
 458        kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt,                  \
 459                        (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
 460
 461#define vcpu_debug(vcpu, fmt, ...)                                      \
 462        kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
 463#define vcpu_debug_ratelimited(vcpu, fmt, ...)                          \
 464        kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id,           \
 465                              ## __VA_ARGS__)
 466#define vcpu_err(vcpu, fmt, ...)                                        \
 467        kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
 468
 469static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 470{
 471        /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
 472         * the caller has read kvm->online_vcpus before (as is the case
 473         * for kvm_for_each_vcpu, for example).
 474         */
 475        smp_rmb();
 476        return kvm->vcpus[i];
 477}
 478
 479#define kvm_for_each_vcpu(idx, vcpup, kvm) \
 480        for (idx = 0; \
 481             idx < atomic_read(&kvm->online_vcpus) && \
 482             (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
 483             idx++)
 484
 485static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
 486{
 487        struct kvm_vcpu *vcpu = NULL;
 488        int i;
 489
 490        if (id < 0)
 491                return NULL;
 492        if (id < KVM_MAX_VCPUS)
 493                vcpu = kvm_get_vcpu(kvm, id);
 494        if (vcpu && vcpu->vcpu_id == id)
 495                return vcpu;
 496        kvm_for_each_vcpu(i, vcpu, kvm)
 497                if (vcpu->vcpu_id == id)
 498                        return vcpu;
 499        return NULL;
 500}
 501
 502static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
 503{
 504        struct kvm_vcpu *tmp;
 505        int idx;
 506
 507        kvm_for_each_vcpu(idx, tmp, vcpu->kvm)
 508                if (tmp == vcpu)
 509                        return idx;
 510        BUG();
 511}
 512
 513#define kvm_for_each_memslot(memslot, slots)    \
 514        for (memslot = &slots->memslots[0];     \
 515              memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
 516                memslot++)
 517
 518int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
 519void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
 520
 521int __must_check vcpu_load(struct kvm_vcpu *vcpu);
 522void vcpu_put(struct kvm_vcpu *vcpu);
 523
 524#ifdef __KVM_HAVE_IOAPIC
 525void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
 526void kvm_arch_post_irq_routing_update(struct kvm *kvm);
 527#else
 528static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
 529{
 530}
 531static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
 532{
 533}
 534#endif
 535
 536#ifdef CONFIG_HAVE_KVM_IRQFD
 537int kvm_irqfd_init(void);
 538void kvm_irqfd_exit(void);
 539#else
 540static inline int kvm_irqfd_init(void)
 541{
 542        return 0;
 543}
 544
 545static inline void kvm_irqfd_exit(void)
 546{
 547}
 548#endif
 549int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 550                  struct module *module);
 551void kvm_exit(void);
 552
 553void kvm_get_kvm(struct kvm *kvm);
 554void kvm_put_kvm(struct kvm *kvm);
 555
 556static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
 557{
 558        return rcu_dereference_check(kvm->memslots[as_id],
 559                        srcu_read_lock_held(&kvm->srcu)
 560                        || lockdep_is_held(&kvm->slots_lock));
 561}
 562
 563static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
 564{
 565        return __kvm_memslots(kvm, 0);
 566}
 567
 568static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
 569{
 570        int as_id = kvm_arch_vcpu_memslots_id(vcpu);
 571
 572        return __kvm_memslots(vcpu->kvm, as_id);
 573}
 574
 575static inline struct kvm_memory_slot *
 576id_to_memslot(struct kvm_memslots *slots, int id)
 577{
 578        int index = slots->id_to_index[id];
 579        struct kvm_memory_slot *slot;
 580
 581        slot = &slots->memslots[index];
 582
 583        WARN_ON(slot->id != id);
 584        return slot;
 585}
 586
 587/*
 588 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
 589 * - create a new memory slot
 590 * - delete an existing memory slot
 591 * - modify an existing memory slot
 592 *   -- move it in the guest physical memory space
 593 *   -- just change its flags
 594 *
 595 * Since flags can be changed by some of these operations, the following
 596 * differentiation is the best we can do for __kvm_set_memory_region():
 597 */
 598enum kvm_mr_change {
 599        KVM_MR_CREATE,
 600        KVM_MR_DELETE,
 601        KVM_MR_MOVE,
 602        KVM_MR_FLAGS_ONLY,
 603};
 604
 605int kvm_set_memory_region(struct kvm *kvm,
 606                          const struct kvm_userspace_memory_region *mem);
 607int __kvm_set_memory_region(struct kvm *kvm,
 608                            const struct kvm_userspace_memory_region *mem);
 609void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
 610                           struct kvm_memory_slot *dont);
 611int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
 612                            unsigned long npages);
 613void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
 614int kvm_arch_prepare_memory_region(struct kvm *kvm,
 615                                struct kvm_memory_slot *memslot,
 616                                const struct kvm_userspace_memory_region *mem,
 617                                enum kvm_mr_change change);
 618void kvm_arch_commit_memory_region(struct kvm *kvm,
 619                                const struct kvm_userspace_memory_region *mem,
 620                                const struct kvm_memory_slot *old,
 621                                const struct kvm_memory_slot *new,
 622                                enum kvm_mr_change change);
 623bool kvm_largepages_enabled(void);
 624void kvm_disable_largepages(void);
 625/* flush all memory translations */
 626void kvm_arch_flush_shadow_all(struct kvm *kvm);
 627/* flush memory translations pointing to 'slot' */
 628void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 629                                   struct kvm_memory_slot *slot);
 630
 631int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
 632                            struct page **pages, int nr_pages);
 633
 634struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
 635unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
 636unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
 637unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
 638unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
 639                                      bool *writable);
 640void kvm_release_page_clean(struct page *page);
 641void kvm_release_page_dirty(struct page *page);
 642void kvm_set_page_accessed(struct page *page);
 643
 644kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
 645kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
 646kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
 647                      bool *writable);
 648kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
 649kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
 650kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
 651                               bool atomic, bool *async, bool write_fault,
 652                               bool *writable);
 653
 654void kvm_release_pfn_clean(kvm_pfn_t pfn);
 655void kvm_set_pfn_dirty(kvm_pfn_t pfn);
 656void kvm_set_pfn_accessed(kvm_pfn_t pfn);
 657void kvm_get_pfn(kvm_pfn_t pfn);
 658
 659int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
 660                        int len);
 661int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
 662                          unsigned long len);
 663int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
 664int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 665                           void *data, unsigned long len);
 666int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
 667                         int offset, int len);
 668int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
 669                    unsigned long len);
 670int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 671                           void *data, unsigned long len);
 672int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 673                           void *data, int offset, unsigned long len);
 674int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 675                              gpa_t gpa, unsigned long len);
 676int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 677int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 678struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 679bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
 680unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
 681void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
 682
 683struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
 684struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
 685kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
 686kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 687struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
 688unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
 689unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
 690int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
 691                             int len);
 692int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
 693                               unsigned long len);
 694int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
 695                        unsigned long len);
 696int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
 697                              int offset, int len);
 698int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
 699                         unsigned long len);
 700void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
 701
 702void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 703void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
 704void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
 705bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
 706void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 707int kvm_vcpu_yield_to(struct kvm_vcpu *target);
 708void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
 709void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 710void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 711
 712void kvm_flush_remote_tlbs(struct kvm *kvm);
 713void kvm_reload_remote_mmus(struct kvm *kvm);
 714bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
 715
 716long kvm_arch_dev_ioctl(struct file *filp,
 717                        unsigned int ioctl, unsigned long arg);
 718long kvm_arch_vcpu_ioctl(struct file *filp,
 719                         unsigned int ioctl, unsigned long arg);
 720int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
 721
 722int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
 723
 724int kvm_get_dirty_log(struct kvm *kvm,
 725                        struct kvm_dirty_log *log, int *is_dirty);
 726
 727int kvm_get_dirty_log_protect(struct kvm *kvm,
 728                        struct kvm_dirty_log *log, bool *is_dirty);
 729
 730void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
 731                                        struct kvm_memory_slot *slot,
 732                                        gfn_t gfn_offset,
 733                                        unsigned long mask);
 734
 735int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 736                                struct kvm_dirty_log *log);
 737
 738int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
 739                        bool line_status);
 740long kvm_arch_vm_ioctl(struct file *filp,
 741                       unsigned int ioctl, unsigned long arg);
 742
 743int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 744int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 745
 746int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 747                                    struct kvm_translation *tr);
 748
 749int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 750int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 751int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 752                                  struct kvm_sregs *sregs);
 753int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 754                                  struct kvm_sregs *sregs);
 755int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 756                                    struct kvm_mp_state *mp_state);
 757int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 758                                    struct kvm_mp_state *mp_state);
 759int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 760                                        struct kvm_guest_debug *dbg);
 761int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 762
 763int kvm_arch_init(void *opaque);
 764void kvm_arch_exit(void);
 765
 766int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
 767void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
 768
 769void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
 770
 771void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
 772void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 773void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
 774struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
 775int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
 776void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
 777void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
 778
 779bool kvm_arch_has_vcpu_debugfs(void);
 780int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
 781
 782int kvm_arch_hardware_enable(void);
 783void kvm_arch_hardware_disable(void);
 784int kvm_arch_hardware_setup(void);
 785void kvm_arch_hardware_unsetup(void);
 786void kvm_arch_check_processor_compat(void *rtn);
 787int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 788int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
 789
 790#ifndef __KVM_HAVE_ARCH_VM_ALLOC
 791static inline struct kvm *kvm_arch_alloc_vm(void)
 792{
 793        return kzalloc(sizeof(struct kvm), GFP_KERNEL);
 794}
 795
 796static inline void kvm_arch_free_vm(struct kvm *kvm)
 797{
 798        kfree(kvm);
 799}
 800#endif
 801
 802#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
 803void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
 804void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
 805bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
 806#else
 807static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
 808{
 809}
 810
 811static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
 812{
 813}
 814
 815static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
 816{
 817        return false;
 818}
 819#endif
 820#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
 821void kvm_arch_start_assignment(struct kvm *kvm);
 822void kvm_arch_end_assignment(struct kvm *kvm);
 823bool kvm_arch_has_assigned_device(struct kvm *kvm);
 824#else
 825static inline void kvm_arch_start_assignment(struct kvm *kvm)
 826{
 827}
 828
 829static inline void kvm_arch_end_assignment(struct kvm *kvm)
 830{
 831}
 832
 833static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
 834{
 835        return false;
 836}
 837#endif
 838
 839static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
 840{
 841#ifdef __KVM_HAVE_ARCH_WQP
 842        return vcpu->arch.wqp;
 843#else
 844        return &vcpu->wq;
 845#endif
 846}
 847
 848#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
 849/*
 850 * returns true if the virtual interrupt controller is initialized and
 851 * ready to accept virtual IRQ. On some architectures the virtual interrupt
 852 * controller is dynamically instantiated and this is not always true.
 853 */
 854bool kvm_arch_intc_initialized(struct kvm *kvm);
 855#else
 856static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
 857{
 858        return true;
 859}
 860#endif
 861
 862int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
 863void kvm_arch_destroy_vm(struct kvm *kvm);
 864void kvm_arch_sync_events(struct kvm *kvm);
 865
 866int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
 867void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 868
 869bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
 870
 871struct kvm_irq_ack_notifier {
 872        struct hlist_node link;
 873        unsigned gsi;
 874        void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
 875};
 876
 877int kvm_irq_map_gsi(struct kvm *kvm,
 878                    struct kvm_kernel_irq_routing_entry *entries, int gsi);
 879int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
 880
 881int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
 882                bool line_status);
 883int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
 884                int irq_source_id, int level, bool line_status);
 885int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
 886                               struct kvm *kvm, int irq_source_id,
 887                               int level, bool line_status);
 888bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
 889void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
 890void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
 891void kvm_register_irq_ack_notifier(struct kvm *kvm,
 892                                   struct kvm_irq_ack_notifier *kian);
 893void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
 894                                   struct kvm_irq_ack_notifier *kian);
 895int kvm_request_irq_source_id(struct kvm *kvm);
 896void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
 897
 898/*
 899 * search_memslots() and __gfn_to_memslot() are here because they are
 900 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
 901 * gfn_to_memslot() itself isn't here as an inline because that would
 902 * bloat other code too much.
 903 */
 904static inline struct kvm_memory_slot *
 905search_memslots(struct kvm_memslots *slots, gfn_t gfn)
 906{
 907        int start = 0, end = slots->used_slots;
 908        int slot = atomic_read(&slots->lru_slot);
 909        struct kvm_memory_slot *memslots = slots->memslots;
 910
 911        if (gfn >= memslots[slot].base_gfn &&
 912            gfn < memslots[slot].base_gfn + memslots[slot].npages)
 913                return &memslots[slot];
 914
 915        while (start < end) {
 916                slot = start + (end - start) / 2;
 917
 918                if (gfn >= memslots[slot].base_gfn)
 919                        end = slot;
 920                else
 921                        start = slot + 1;
 922        }
 923
 924        if (gfn >= memslots[start].base_gfn &&
 925            gfn < memslots[start].base_gfn + memslots[start].npages) {
 926                atomic_set(&slots->lru_slot, start);
 927                return &memslots[start];
 928        }
 929
 930        return NULL;
 931}
 932
 933static inline struct kvm_memory_slot *
 934__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 935{
 936        return search_memslots(slots, gfn);
 937}
 938
 939static inline unsigned long
 940__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
 941{
 942        return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
 943}
 944
 945static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
 946{
 947        return gfn_to_memslot(kvm, gfn)->id;
 948}
 949
 950static inline gfn_t
 951hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
 952{
 953        gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
 954
 955        return slot->base_gfn + gfn_offset;
 956}
 957
 958static inline gpa_t gfn_to_gpa(gfn_t gfn)
 959{
 960        return (gpa_t)gfn << PAGE_SHIFT;
 961}
 962
 963static inline gfn_t gpa_to_gfn(gpa_t gpa)
 964{
 965        return (gfn_t)(gpa >> PAGE_SHIFT);
 966}
 967
 968static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
 969{
 970        return (hpa_t)pfn << PAGE_SHIFT;
 971}
 972
 973static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
 974{
 975        unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
 976
 977        return kvm_is_error_hva(hva);
 978}
 979
 980enum kvm_stat_kind {
 981        KVM_STAT_VM,
 982        KVM_STAT_VCPU,
 983};
 984
 985struct kvm_stat_data {
 986        int offset;
 987        struct kvm *kvm;
 988};
 989
 990struct kvm_stats_debugfs_item {
 991        const char *name;
 992        int offset;
 993        enum kvm_stat_kind kind;
 994};
 995extern struct kvm_stats_debugfs_item debugfs_entries[];
 996extern struct dentry *kvm_debugfs_dir;
 997
 998#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 999static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
1000{
1001        if (unlikely(kvm->mmu_notifier_count))
1002                return 1;
1003        /*
1004         * Ensure the read of mmu_notifier_count happens before the read
1005         * of mmu_notifier_seq.  This interacts with the smp_wmb() in
1006         * mmu_notifier_invalidate_range_end to make sure that the caller
1007         * either sees the old (non-zero) value of mmu_notifier_count or
1008         * the new (incremented) value of mmu_notifier_seq.
1009         * PowerPC Book3s HV KVM calls this under a per-page lock
1010         * rather than under kvm->mmu_lock, for scalability, so
1011         * can't rely on kvm->mmu_lock to keep things ordered.
1012         */
1013        smp_rmb();
1014        if (kvm->mmu_notifier_seq != mmu_seq)
1015                return 1;
1016        return 0;
1017}
1018#endif
1019
1020#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1021
1022#ifdef CONFIG_S390
1023#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
1024#elif defined(CONFIG_ARM64)
1025#define KVM_MAX_IRQ_ROUTES 4096
1026#else
1027#define KVM_MAX_IRQ_ROUTES 1024
1028#endif
1029
1030bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
1031int kvm_set_irq_routing(struct kvm *kvm,
1032                        const struct kvm_irq_routing_entry *entries,
1033                        unsigned nr,
1034                        unsigned flags);
1035int kvm_set_routing_entry(struct kvm *kvm,
1036                          struct kvm_kernel_irq_routing_entry *e,
1037                          const struct kvm_irq_routing_entry *ue);
1038void kvm_free_irq_routing(struct kvm *kvm);
1039
1040#else
1041
1042static inline void kvm_free_irq_routing(struct kvm *kvm) {}
1043
1044#endif
1045
1046int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1047
1048#ifdef CONFIG_HAVE_KVM_EVENTFD
1049
1050void kvm_eventfd_init(struct kvm *kvm);
1051int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1052
1053#ifdef CONFIG_HAVE_KVM_IRQFD
1054int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1055void kvm_irqfd_release(struct kvm *kvm);
1056void kvm_irq_routing_update(struct kvm *);
1057#else
1058static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1059{
1060        return -EINVAL;
1061}
1062
1063static inline void kvm_irqfd_release(struct kvm *kvm) {}
1064#endif
1065
1066#else
1067
1068static inline void kvm_eventfd_init(struct kvm *kvm) {}
1069
1070static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1071{
1072        return -EINVAL;
1073}
1074
1075static inline void kvm_irqfd_release(struct kvm *kvm) {}
1076
1077#ifdef CONFIG_HAVE_KVM_IRQCHIP
1078static inline void kvm_irq_routing_update(struct kvm *kvm)
1079{
1080}
1081#endif
1082void kvm_arch_irq_routing_update(struct kvm *kvm);
1083
1084static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1085{
1086        return -ENOSYS;
1087}
1088
1089#endif /* CONFIG_HAVE_KVM_EVENTFD */
1090
1091static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1092{
1093        /*
1094         * Ensure the rest of the request is published to kvm_check_request's
1095         * caller.  Paired with the smp_mb__after_atomic in kvm_check_request.
1096         */
1097        smp_wmb();
1098        set_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
1099}
1100
1101static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
1102{
1103        return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
1104}
1105
1106static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
1107{
1108        clear_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
1109}
1110
1111static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1112{
1113        if (kvm_test_request(req, vcpu)) {
1114                kvm_clear_request(req, vcpu);
1115
1116                /*
1117                 * Ensure the rest of the request is visible to kvm_check_request's
1118                 * caller.  Paired with the smp_wmb in kvm_make_request.
1119                 */
1120                smp_mb__after_atomic();
1121                return true;
1122        } else {
1123                return false;
1124        }
1125}
1126
1127extern bool kvm_rebooting;
1128
1129extern unsigned int halt_poll_ns;
1130extern unsigned int halt_poll_ns_grow;
1131extern unsigned int halt_poll_ns_shrink;
1132
1133struct kvm_device {
1134        struct kvm_device_ops *ops;
1135        struct kvm *kvm;
1136        void *private;
1137        struct list_head vm_node;
1138};
1139
1140/* create, destroy, and name are mandatory */
1141struct kvm_device_ops {
1142        const char *name;
1143
1144        /*
1145         * create is called holding kvm->lock and any operations not suitable
1146         * to do while holding the lock should be deferred to init (see
1147         * below).
1148         */
1149        int (*create)(struct kvm_device *dev, u32 type);
1150
1151        /*
1152         * init is called after create if create is successful and is called
1153         * outside of holding kvm->lock.
1154         */
1155        void (*init)(struct kvm_device *dev);
1156
1157        /*
1158         * Destroy is responsible for freeing dev.
1159         *
1160         * Destroy may be called before or after destructors are called
1161         * on emulated I/O regions, depending on whether a reference is
1162         * held by a vcpu or other kvm component that gets destroyed
1163         * after the emulated I/O.
1164         */
1165        void (*destroy)(struct kvm_device *dev);
1166
1167        int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1168        int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1169        int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1170        long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1171                      unsigned long arg);
1172};
1173
1174void kvm_device_get(struct kvm_device *dev);
1175void kvm_device_put(struct kvm_device *dev);
1176struct kvm_device *kvm_device_from_filp(struct file *filp);
1177int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
1178void kvm_unregister_device_ops(u32 type);
1179
1180extern struct kvm_device_ops kvm_mpic_ops;
1181extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
1182extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
1183
1184#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1185
1186static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1187{
1188        vcpu->spin_loop.in_spin_loop = val;
1189}
1190static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1191{
1192        vcpu->spin_loop.dy_eligible = val;
1193}
1194
1195#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1196
1197static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1198{
1199}
1200
1201static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1202{
1203}
1204#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1205
1206#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
1207bool kvm_arch_has_irq_bypass(void);
1208int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
1209                           struct irq_bypass_producer *);
1210void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
1211                           struct irq_bypass_producer *);
1212void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
1213void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
1214int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
1215                                  uint32_t guest_irq, bool set);
1216#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
1217
1218#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
1219/* If we wakeup during the poll time, was it a sucessful poll? */
1220static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1221{
1222        return vcpu->valid_wakeup;
1223}
1224
1225#else
1226static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1227{
1228        return true;
1229}
1230#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
1231
1232#endif
1233