linux/include/linux/kvm_host.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2#ifndef __KVM_HOST_H
   3#define __KVM_HOST_H
   4
   5
   6#include <linux/types.h>
   7#include <linux/hardirq.h>
   8#include <linux/list.h>
   9#include <linux/mutex.h>
  10#include <linux/spinlock.h>
  11#include <linux/signal.h>
  12#include <linux/sched.h>
  13#include <linux/sched/stat.h>
  14#include <linux/bug.h>
  15#include <linux/minmax.h>
  16#include <linux/mm.h>
  17#include <linux/mmu_notifier.h>
  18#include <linux/preempt.h>
  19#include <linux/msi.h>
  20#include <linux/slab.h>
  21#include <linux/vmalloc.h>
  22#include <linux/rcupdate.h>
  23#include <linux/ratelimit.h>
  24#include <linux/err.h>
  25#include <linux/irqflags.h>
  26#include <linux/context_tracking.h>
  27#include <linux/irqbypass.h>
  28#include <linux/rcuwait.h>
  29#include <linux/refcount.h>
  30#include <linux/nospec.h>
  31#include <linux/notifier.h>
  32#include <asm/signal.h>
  33
  34#include <linux/kvm.h>
  35#include <linux/kvm_para.h>
  36
  37#include <linux/kvm_types.h>
  38
  39#include <asm/kvm_host.h>
  40#include <linux/kvm_dirty_ring.h>
  41
  42#ifndef KVM_MAX_VCPU_IDS
  43#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
  44#endif
  45
  46/*
  47 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
  48 * in kvm, other bits are visible for userspace which are defined in
  49 * include/linux/kvm_h.
  50 */
  51#define KVM_MEMSLOT_INVALID     (1UL << 16)
  52
  53/*
  54 * Bit 63 of the memslot generation number is an "update in-progress flag",
  55 * e.g. is temporarily set for the duration of install_new_memslots().
  56 * This flag effectively creates a unique generation number that is used to
  57 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
  58 * i.e. may (or may not) have come from the previous memslots generation.
  59 *
  60 * This is necessary because the actual memslots update is not atomic with
  61 * respect to the generation number update.  Updating the generation number
  62 * first would allow a vCPU to cache a spte from the old memslots using the
  63 * new generation number, and updating the generation number after switching
  64 * to the new memslots would allow cache hits using the old generation number
  65 * to reference the defunct memslots.
  66 *
  67 * This mechanism is used to prevent getting hits in KVM's caches while a
  68 * memslot update is in-progress, and to prevent cache hits *after* updating
  69 * the actual generation number against accesses that were inserted into the
  70 * cache *before* the memslots were updated.
  71 */
  72#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS      BIT_ULL(63)
  73
  74/* Two fragments for cross MMIO pages. */
  75#define KVM_MAX_MMIO_FRAGMENTS  2
  76
  77#ifndef KVM_ADDRESS_SPACE_NUM
  78#define KVM_ADDRESS_SPACE_NUM   1
  79#endif
  80
  81/*
  82 * For the normal pfn, the highest 12 bits should be zero,
  83 * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
  84 * mask bit 63 to indicate the noslot pfn.
  85 */
  86#define KVM_PFN_ERR_MASK        (0x7ffULL << 52)
  87#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
  88#define KVM_PFN_NOSLOT          (0x1ULL << 63)
  89
  90#define KVM_PFN_ERR_FAULT       (KVM_PFN_ERR_MASK)
  91#define KVM_PFN_ERR_HWPOISON    (KVM_PFN_ERR_MASK + 1)
  92#define KVM_PFN_ERR_RO_FAULT    (KVM_PFN_ERR_MASK + 2)
  93
  94/*
  95 * error pfns indicate that the gfn is in slot but faild to
  96 * translate it to pfn on host.
  97 */
  98static inline bool is_error_pfn(kvm_pfn_t pfn)
  99{
 100        return !!(pfn & KVM_PFN_ERR_MASK);
 101}
 102
 103/*
 104 * error_noslot pfns indicate that the gfn can not be
 105 * translated to pfn - it is not in slot or failed to
 106 * translate it to pfn.
 107 */
 108static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
 109{
 110        return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
 111}
 112
 113/* noslot pfn indicates that the gfn is not in slot. */
 114static inline bool is_noslot_pfn(kvm_pfn_t pfn)
 115{
 116        return pfn == KVM_PFN_NOSLOT;
 117}
 118
 119/*
 120 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
 121 * provide own defines and kvm_is_error_hva
 122 */
 123#ifndef KVM_HVA_ERR_BAD
 124
 125#define KVM_HVA_ERR_BAD         (PAGE_OFFSET)
 126#define KVM_HVA_ERR_RO_BAD      (PAGE_OFFSET + PAGE_SIZE)
 127
 128static inline bool kvm_is_error_hva(unsigned long addr)
 129{
 130        return addr >= PAGE_OFFSET;
 131}
 132
 133#endif
 134
 135#define KVM_ERR_PTR_BAD_PAGE    (ERR_PTR(-ENOENT))
 136
 137static inline bool is_error_page(struct page *page)
 138{
 139        return IS_ERR(page);
 140}
 141
 142#define KVM_REQUEST_MASK           GENMASK(7,0)
 143#define KVM_REQUEST_NO_WAKEUP      BIT(8)
 144#define KVM_REQUEST_WAIT           BIT(9)
 145/*
 146 * Architecture-independent vcpu->requests bit members
 147 * Bits 4-7 are reserved for more arch-independent bits.
 148 */
 149#define KVM_REQ_TLB_FLUSH         (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 150#define KVM_REQ_MMU_RELOAD        (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 151#define KVM_REQ_UNBLOCK           2
 152#define KVM_REQ_UNHALT            3
 153#define KVM_REQ_VM_DEAD           (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 154#define KVM_REQUEST_ARCH_BASE     8
 155
 156#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
 157        BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
 158        (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
 159})
 160#define KVM_ARCH_REQ(nr)           KVM_ARCH_REQ_FLAGS(nr, 0)
 161
 162bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
 163                                 unsigned long *vcpu_bitmap);
 164bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
 165bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
 166                                      struct kvm_vcpu *except);
 167bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
 168                                unsigned long *vcpu_bitmap);
 169
 170#define KVM_USERSPACE_IRQ_SOURCE_ID             0
 171#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID        1
 172
 173extern struct mutex kvm_lock;
 174extern struct list_head vm_list;
 175
 176struct kvm_io_range {
 177        gpa_t addr;
 178        int len;
 179        struct kvm_io_device *dev;
 180};
 181
 182#define NR_IOBUS_DEVS 1000
 183
 184struct kvm_io_bus {
 185        int dev_count;
 186        int ioeventfd_count;
 187        struct kvm_io_range range[];
 188};
 189
 190enum kvm_bus {
 191        KVM_MMIO_BUS,
 192        KVM_PIO_BUS,
 193        KVM_VIRTIO_CCW_NOTIFY_BUS,
 194        KVM_FAST_MMIO_BUS,
 195        KVM_NR_BUSES
 196};
 197
 198int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
 199                     int len, const void *val);
 200int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
 201                            gpa_t addr, int len, const void *val, long cookie);
 202int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
 203                    int len, void *val);
 204int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 205                            int len, struct kvm_io_device *dev);
 206int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 207                              struct kvm_io_device *dev);
 208struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 209                                         gpa_t addr);
 210
 211#ifdef CONFIG_KVM_ASYNC_PF
 212struct kvm_async_pf {
 213        struct work_struct work;
 214        struct list_head link;
 215        struct list_head queue;
 216        struct kvm_vcpu *vcpu;
 217        struct mm_struct *mm;
 218        gpa_t cr2_or_gpa;
 219        unsigned long addr;
 220        struct kvm_arch_async_pf arch;
 221        bool   wakeup_all;
 222        bool notpresent_injected;
 223};
 224
 225void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
 226void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
 227bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 228                        unsigned long hva, struct kvm_arch_async_pf *arch);
 229int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 230#endif
 231
 232#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
 233struct kvm_gfn_range {
 234        struct kvm_memory_slot *slot;
 235        gfn_t start;
 236        gfn_t end;
 237        pte_t pte;
 238        bool may_block;
 239};
 240bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
 241bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
 242bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
 243bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
 244#endif
 245
 246enum {
 247        OUTSIDE_GUEST_MODE,
 248        IN_GUEST_MODE,
 249        EXITING_GUEST_MODE,
 250        READING_SHADOW_PAGE_TABLES,
 251};
 252
 253#define KVM_UNMAPPED_PAGE       ((void *) 0x500 + POISON_POINTER_DELTA)
 254
 255struct kvm_host_map {
 256        /*
 257         * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
 258         * a 'struct page' for it. When using mem= kernel parameter some memory
 259         * can be used as guest memory but they are not managed by host
 260         * kernel).
 261         * If 'pfn' is not managed by the host kernel, this field is
 262         * initialized to KVM_UNMAPPED_PAGE.
 263         */
 264        struct page *page;
 265        void *hva;
 266        kvm_pfn_t pfn;
 267        kvm_pfn_t gfn;
 268};
 269
 270/*
 271 * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
 272 * directly to check for that.
 273 */
 274static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
 275{
 276        return !!map->hva;
 277}
 278
 279static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
 280{
 281        return single_task_running() && !need_resched() && ktime_before(cur, stop);
 282}
 283
 284/*
 285 * Sometimes a large or cross-page mmio needs to be broken up into separate
 286 * exits for userspace servicing.
 287 */
 288struct kvm_mmio_fragment {
 289        gpa_t gpa;
 290        void *data;
 291        unsigned len;
 292};
 293
 294struct kvm_vcpu {
 295        struct kvm *kvm;
 296#ifdef CONFIG_PREEMPT_NOTIFIERS
 297        struct preempt_notifier preempt_notifier;
 298#endif
 299        int cpu;
 300        int vcpu_id; /* id given by userspace at creation */
 301        int vcpu_idx; /* index in kvm->vcpus array */
 302        int srcu_idx;
 303        int mode;
 304        u64 requests;
 305        unsigned long guest_debug;
 306
 307        int pre_pcpu;
 308        struct list_head blocked_vcpu_list;
 309
 310        struct mutex mutex;
 311        struct kvm_run *run;
 312
 313        struct rcuwait wait;
 314        struct pid __rcu *pid;
 315        int sigset_active;
 316        sigset_t sigset;
 317        unsigned int halt_poll_ns;
 318        bool valid_wakeup;
 319
 320#ifdef CONFIG_HAS_IOMEM
 321        int mmio_needed;
 322        int mmio_read_completed;
 323        int mmio_is_write;
 324        int mmio_cur_fragment;
 325        int mmio_nr_fragments;
 326        struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
 327#endif
 328
 329#ifdef CONFIG_KVM_ASYNC_PF
 330        struct {
 331                u32 queued;
 332                struct list_head queue;
 333                struct list_head done;
 334                spinlock_t lock;
 335        } async_pf;
 336#endif
 337
 338#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
 339        /*
 340         * Cpu relax intercept or pause loop exit optimization
 341         * in_spin_loop: set when a vcpu does a pause loop exit
 342         *  or cpu relax intercepted.
 343         * dy_eligible: indicates whether vcpu is eligible for directed yield.
 344         */
 345        struct {
 346                bool in_spin_loop;
 347                bool dy_eligible;
 348        } spin_loop;
 349#endif
 350        bool preempted;
 351        bool ready;
 352        struct kvm_vcpu_arch arch;
 353        struct kvm_vcpu_stat stat;
 354        char stats_id[KVM_STATS_NAME_SIZE];
 355        struct kvm_dirty_ring dirty_ring;
 356
 357        /*
 358         * The index of the most recently used memslot by this vCPU. It's ok
 359         * if this becomes stale due to memslot changes since we always check
 360         * it is a valid slot.
 361         */
 362        int last_used_slot;
 363};
 364
 365/* must be called with irqs disabled */
 366static __always_inline void guest_enter_irqoff(void)
 367{
 368        /*
 369         * This is running in ioctl context so its safe to assume that it's the
 370         * stime pending cputime to flush.
 371         */
 372        instrumentation_begin();
 373        vtime_account_guest_enter();
 374        instrumentation_end();
 375
 376        /*
 377         * KVM does not hold any references to rcu protected data when it
 378         * switches CPU into a guest mode. In fact switching to a guest mode
 379         * is very similar to exiting to userspace from rcu point of view. In
 380         * addition CPU may stay in a guest mode for quite a long time (up to
 381         * one time slice). Lets treat guest mode as quiescent state, just like
 382         * we do with user-mode execution.
 383         */
 384        if (!context_tracking_guest_enter()) {
 385                instrumentation_begin();
 386                rcu_virt_note_context_switch(smp_processor_id());
 387                instrumentation_end();
 388        }
 389}
 390
 391static __always_inline void guest_exit_irqoff(void)
 392{
 393        context_tracking_guest_exit();
 394
 395        instrumentation_begin();
 396        /* Flush the guest cputime we spent on the guest */
 397        vtime_account_guest_exit();
 398        instrumentation_end();
 399}
 400
 401static inline void guest_exit(void)
 402{
 403        unsigned long flags;
 404
 405        local_irq_save(flags);
 406        guest_exit_irqoff();
 407        local_irq_restore(flags);
 408}
 409
 410static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
 411{
 412        /*
 413         * The memory barrier ensures a previous write to vcpu->requests cannot
 414         * be reordered with the read of vcpu->mode.  It pairs with the general
 415         * memory barrier following the write of vcpu->mode in VCPU RUN.
 416         */
 417        smp_mb__before_atomic();
 418        return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
 419}
 420
 421/*
 422 * Some of the bitops functions do not support too long bitmaps.
 423 * This number must be determined not to exceed such limits.
 424 */
 425#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 426
 427struct kvm_memory_slot {
 428        gfn_t base_gfn;
 429        unsigned long npages;
 430        unsigned long *dirty_bitmap;
 431        struct kvm_arch_memory_slot arch;
 432        unsigned long userspace_addr;
 433        u32 flags;
 434        short id;
 435        u16 as_id;
 436};
 437
 438static inline bool kvm_slot_dirty_track_enabled(struct kvm_memory_slot *slot)
 439{
 440        return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
 441}
 442
 443static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
 444{
 445        return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
 446}
 447
 448static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
 449{
 450        unsigned long len = kvm_dirty_bitmap_bytes(memslot);
 451
 452        return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
 453}
 454
 455#ifndef KVM_DIRTY_LOG_MANUAL_CAPS
 456#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
 457#endif
 458
 459struct kvm_s390_adapter_int {
 460        u64 ind_addr;
 461        u64 summary_addr;
 462        u64 ind_offset;
 463        u32 summary_offset;
 464        u32 adapter_id;
 465};
 466
 467struct kvm_hv_sint {
 468        u32 vcpu;
 469        u32 sint;
 470};
 471
 472struct kvm_kernel_irq_routing_entry {
 473        u32 gsi;
 474        u32 type;
 475        int (*set)(struct kvm_kernel_irq_routing_entry *e,
 476                   struct kvm *kvm, int irq_source_id, int level,
 477                   bool line_status);
 478        union {
 479                struct {
 480                        unsigned irqchip;
 481                        unsigned pin;
 482                } irqchip;
 483                struct {
 484                        u32 address_lo;
 485                        u32 address_hi;
 486                        u32 data;
 487                        u32 flags;
 488                        u32 devid;
 489                } msi;
 490                struct kvm_s390_adapter_int adapter;
 491                struct kvm_hv_sint hv_sint;
 492        };
 493        struct hlist_node link;
 494};
 495
 496#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
 497struct kvm_irq_routing_table {
 498        int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
 499        u32 nr_rt_entries;
 500        /*
 501         * Array indexed by gsi. Each entry contains list of irq chips
 502         * the gsi is connected to.
 503         */
 504        struct hlist_head map[];
 505};
 506#endif
 507
 508#ifndef KVM_PRIVATE_MEM_SLOTS
 509#define KVM_PRIVATE_MEM_SLOTS 0
 510#endif
 511
 512#define KVM_MEM_SLOTS_NUM SHRT_MAX
 513#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS)
 514
 515#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
 516static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
 517{
 518        return 0;
 519}
 520#endif
 521
 522/*
 523 * Note:
 524 * memslots are not sorted by id anymore, please use id_to_memslot()
 525 * to get the memslot by its id.
 526 */
 527struct kvm_memslots {
 528        u64 generation;
 529        /* The mapping table from slot id to the index in memslots[]. */
 530        short id_to_index[KVM_MEM_SLOTS_NUM];
 531        atomic_t last_used_slot;
 532        int used_slots;
 533        struct kvm_memory_slot memslots[];
 534};
 535
 536struct kvm {
 537#ifdef KVM_HAVE_MMU_RWLOCK
 538        rwlock_t mmu_lock;
 539#else
 540        spinlock_t mmu_lock;
 541#endif /* KVM_HAVE_MMU_RWLOCK */
 542
 543        struct mutex slots_lock;
 544
 545        /*
 546         * Protects the arch-specific fields of struct kvm_memory_slots in
 547         * use by the VM. To be used under the slots_lock (above) or in a
 548         * kvm->srcu critical section where acquiring the slots_lock would
 549         * lead to deadlock with the synchronize_srcu in
 550         * install_new_memslots.
 551         */
 552        struct mutex slots_arch_lock;
 553        struct mm_struct *mm; /* userspace tied to this vm */
 554        struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
 555        struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
 556
 557        /* Used to wait for completion of MMU notifiers.  */
 558        spinlock_t mn_invalidate_lock;
 559        unsigned long mn_active_invalidate_count;
 560        struct rcuwait mn_memslots_update_rcuwait;
 561
 562        /*
 563         * created_vcpus is protected by kvm->lock, and is incremented
 564         * at the beginning of KVM_CREATE_VCPU.  online_vcpus is only
 565         * incremented after storing the kvm_vcpu pointer in vcpus,
 566         * and is accessed atomically.
 567         */
 568        atomic_t online_vcpus;
 569        int created_vcpus;
 570        int last_boosted_vcpu;
 571        struct list_head vm_list;
 572        struct mutex lock;
 573        struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
 574#ifdef CONFIG_HAVE_KVM_EVENTFD
 575        struct {
 576                spinlock_t        lock;
 577                struct list_head  items;
 578                struct list_head  resampler_list;
 579                struct mutex      resampler_lock;
 580        } irqfds;
 581        struct list_head ioeventfds;
 582#endif
 583        struct kvm_vm_stat stat;
 584        struct kvm_arch arch;
 585        refcount_t users_count;
 586#ifdef CONFIG_KVM_MMIO
 587        struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
 588        spinlock_t ring_lock;
 589        struct list_head coalesced_zones;
 590#endif
 591
 592        struct mutex irq_lock;
 593#ifdef CONFIG_HAVE_KVM_IRQCHIP
 594        /*
 595         * Update side is protected by irq_lock.
 596         */
 597        struct kvm_irq_routing_table __rcu *irq_routing;
 598#endif
 599#ifdef CONFIG_HAVE_KVM_IRQFD
 600        struct hlist_head irq_ack_notifier_list;
 601#endif
 602
 603#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 604        struct mmu_notifier mmu_notifier;
 605        unsigned long mmu_notifier_seq;
 606        long mmu_notifier_count;
 607        unsigned long mmu_notifier_range_start;
 608        unsigned long mmu_notifier_range_end;
 609#endif
 610        struct list_head devices;
 611        u64 manual_dirty_log_protect;
 612        struct dentry *debugfs_dentry;
 613        struct kvm_stat_data **debugfs_stat_data;
 614        struct srcu_struct srcu;
 615        struct srcu_struct irq_srcu;
 616        pid_t userspace_pid;
 617        unsigned int max_halt_poll_ns;
 618        u32 dirty_ring_size;
 619        bool vm_bugged;
 620        bool vm_dead;
 621
 622#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
 623        struct notifier_block pm_notifier;
 624#endif
 625        char stats_id[KVM_STATS_NAME_SIZE];
 626};
 627
 628#define kvm_err(fmt, ...) \
 629        pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 630#define kvm_info(fmt, ...) \
 631        pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 632#define kvm_debug(fmt, ...) \
 633        pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 634#define kvm_debug_ratelimited(fmt, ...) \
 635        pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
 636                             ## __VA_ARGS__)
 637#define kvm_pr_unimpl(fmt, ...) \
 638        pr_err_ratelimited("kvm [%i]: " fmt, \
 639                           task_tgid_nr(current), ## __VA_ARGS__)
 640
 641/* The guest did something we don't support. */
 642#define vcpu_unimpl(vcpu, fmt, ...)                                     \
 643        kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt,                  \
 644                        (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
 645
 646#define vcpu_debug(vcpu, fmt, ...)                                      \
 647        kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
 648#define vcpu_debug_ratelimited(vcpu, fmt, ...)                          \
 649        kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id,           \
 650                              ## __VA_ARGS__)
 651#define vcpu_err(vcpu, fmt, ...)                                        \
 652        kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
 653
 654static inline void kvm_vm_dead(struct kvm *kvm)
 655{
 656        kvm->vm_dead = true;
 657        kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
 658}
 659
 660static inline void kvm_vm_bugged(struct kvm *kvm)
 661{
 662        kvm->vm_bugged = true;
 663        kvm_vm_dead(kvm);
 664}
 665
 666
 667#define KVM_BUG(cond, kvm, fmt...)                              \
 668({                                                              \
 669        int __ret = (cond);                                     \
 670                                                                \
 671        if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt))         \
 672                kvm_vm_bugged(kvm);                             \
 673        unlikely(__ret);                                        \
 674})
 675
 676#define KVM_BUG_ON(cond, kvm)                                   \
 677({                                                              \
 678        int __ret = (cond);                                     \
 679                                                                \
 680        if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged))           \
 681                kvm_vm_bugged(kvm);                             \
 682        unlikely(__ret);                                        \
 683})
 684
 685static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
 686{
 687        return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
 688}
 689
 690static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
 691{
 692        return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
 693                                      lockdep_is_held(&kvm->slots_lock) ||
 694                                      !refcount_read(&kvm->users_count));
 695}
 696
 697static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 698{
 699        int num_vcpus = atomic_read(&kvm->online_vcpus);
 700        i = array_index_nospec(i, num_vcpus);
 701
 702        /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu.  */
 703        smp_rmb();
 704        return kvm->vcpus[i];
 705}
 706
 707#define kvm_for_each_vcpu(idx, vcpup, kvm) \
 708        for (idx = 0; \
 709             idx < atomic_read(&kvm->online_vcpus) && \
 710             (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
 711             idx++)
 712
 713static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
 714{
 715        struct kvm_vcpu *vcpu = NULL;
 716        int i;
 717
 718        if (id < 0)
 719                return NULL;
 720        if (id < KVM_MAX_VCPUS)
 721                vcpu = kvm_get_vcpu(kvm, id);
 722        if (vcpu && vcpu->vcpu_id == id)
 723                return vcpu;
 724        kvm_for_each_vcpu(i, vcpu, kvm)
 725                if (vcpu->vcpu_id == id)
 726                        return vcpu;
 727        return NULL;
 728}
 729
 730#define kvm_for_each_memslot(memslot, slots)                            \
 731        for (memslot = &slots->memslots[0];                             \
 732             memslot < slots->memslots + slots->used_slots; memslot++)  \
 733                if (WARN_ON_ONCE(!memslot->npages)) {                   \
 734                } else
 735
 736void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
 737
 738void vcpu_load(struct kvm_vcpu *vcpu);
 739void vcpu_put(struct kvm_vcpu *vcpu);
 740
 741#ifdef __KVM_HAVE_IOAPIC
 742void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
 743void kvm_arch_post_irq_routing_update(struct kvm *kvm);
 744#else
 745static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
 746{
 747}
 748static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
 749{
 750}
 751#endif
 752
 753#ifdef CONFIG_HAVE_KVM_IRQFD
 754int kvm_irqfd_init(void);
 755void kvm_irqfd_exit(void);
 756#else
 757static inline int kvm_irqfd_init(void)
 758{
 759        return 0;
 760}
 761
 762static inline void kvm_irqfd_exit(void)
 763{
 764}
 765#endif
 766int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 767                  struct module *module);
 768void kvm_exit(void);
 769
 770void kvm_get_kvm(struct kvm *kvm);
 771bool kvm_get_kvm_safe(struct kvm *kvm);
 772void kvm_put_kvm(struct kvm *kvm);
 773bool file_is_kvm(struct file *file);
 774void kvm_put_kvm_no_destroy(struct kvm *kvm);
 775
 776static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
 777{
 778        as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
 779        return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
 780                        lockdep_is_held(&kvm->slots_lock) ||
 781                        !refcount_read(&kvm->users_count));
 782}
 783
 784static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
 785{
 786        return __kvm_memslots(kvm, 0);
 787}
 788
 789static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
 790{
 791        int as_id = kvm_arch_vcpu_memslots_id(vcpu);
 792
 793        return __kvm_memslots(vcpu->kvm, as_id);
 794}
 795
 796static inline
 797struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
 798{
 799        int index = slots->id_to_index[id];
 800        struct kvm_memory_slot *slot;
 801
 802        if (index < 0)
 803                return NULL;
 804
 805        slot = &slots->memslots[index];
 806
 807        WARN_ON(slot->id != id);
 808        return slot;
 809}
 810
 811/*
 812 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
 813 * - create a new memory slot
 814 * - delete an existing memory slot
 815 * - modify an existing memory slot
 816 *   -- move it in the guest physical memory space
 817 *   -- just change its flags
 818 *
 819 * Since flags can be changed by some of these operations, the following
 820 * differentiation is the best we can do for __kvm_set_memory_region():
 821 */
 822enum kvm_mr_change {
 823        KVM_MR_CREATE,
 824        KVM_MR_DELETE,
 825        KVM_MR_MOVE,
 826        KVM_MR_FLAGS_ONLY,
 827};
 828
 829int kvm_set_memory_region(struct kvm *kvm,
 830                          const struct kvm_userspace_memory_region *mem);
 831int __kvm_set_memory_region(struct kvm *kvm,
 832                            const struct kvm_userspace_memory_region *mem);
 833void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
 834void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
 835int kvm_arch_prepare_memory_region(struct kvm *kvm,
 836                                struct kvm_memory_slot *memslot,
 837                                const struct kvm_userspace_memory_region *mem,
 838                                enum kvm_mr_change change);
 839void kvm_arch_commit_memory_region(struct kvm *kvm,
 840                                const struct kvm_userspace_memory_region *mem,
 841                                struct kvm_memory_slot *old,
 842                                const struct kvm_memory_slot *new,
 843                                enum kvm_mr_change change);
 844/* flush all memory translations */
 845void kvm_arch_flush_shadow_all(struct kvm *kvm);
 846/* flush memory translations pointing to 'slot' */
 847void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 848                                   struct kvm_memory_slot *slot);
 849
 850int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
 851                            struct page **pages, int nr_pages);
 852
 853struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
 854unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
 855unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
 856unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
 857unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
 858                                      bool *writable);
 859void kvm_release_page_clean(struct page *page);
 860void kvm_release_page_dirty(struct page *page);
 861void kvm_set_page_accessed(struct page *page);
 862
 863kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
 864kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
 865                      bool *writable);
 866kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
 867kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
 868kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
 869                               bool atomic, bool *async, bool write_fault,
 870                               bool *writable, hva_t *hva);
 871
 872void kvm_release_pfn_clean(kvm_pfn_t pfn);
 873void kvm_release_pfn_dirty(kvm_pfn_t pfn);
 874void kvm_set_pfn_dirty(kvm_pfn_t pfn);
 875void kvm_set_pfn_accessed(kvm_pfn_t pfn);
 876
 877void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
 878int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
 879                        int len);
 880int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
 881int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 882                           void *data, unsigned long len);
 883int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 884                                 void *data, unsigned int offset,
 885                                 unsigned long len);
 886int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
 887                         int offset, int len);
 888int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
 889                    unsigned long len);
 890int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 891                           void *data, unsigned long len);
 892int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 893                                  void *data, unsigned int offset,
 894                                  unsigned long len);
 895int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 896                              gpa_t gpa, unsigned long len);
 897
 898#define __kvm_get_guest(kvm, gfn, offset, v)                            \
 899({                                                                      \
 900        unsigned long __addr = gfn_to_hva(kvm, gfn);                    \
 901        typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
 902        int __ret = -EFAULT;                                            \
 903                                                                        \
 904        if (!kvm_is_error_hva(__addr))                                  \
 905                __ret = get_user(v, __uaddr);                           \
 906        __ret;                                                          \
 907})
 908
 909#define kvm_get_guest(kvm, gpa, v)                                      \
 910({                                                                      \
 911        gpa_t __gpa = gpa;                                              \
 912        struct kvm *__kvm = kvm;                                        \
 913                                                                        \
 914        __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT,                     \
 915                        offset_in_page(__gpa), v);                      \
 916})
 917
 918#define __kvm_put_guest(kvm, gfn, offset, v)                            \
 919({                                                                      \
 920        unsigned long __addr = gfn_to_hva(kvm, gfn);                    \
 921        typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
 922        int __ret = -EFAULT;                                            \
 923                                                                        \
 924        if (!kvm_is_error_hva(__addr))                                  \
 925                __ret = put_user(v, __uaddr);                           \
 926        if (!__ret)                                                     \
 927                mark_page_dirty(kvm, gfn);                              \
 928        __ret;                                                          \
 929})
 930
 931#define kvm_put_guest(kvm, gpa, v)                                      \
 932({                                                                      \
 933        gpa_t __gpa = gpa;                                              \
 934        struct kvm *__kvm = kvm;                                        \
 935                                                                        \
 936        __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT,                     \
 937                        offset_in_page(__gpa), v);                      \
 938})
 939
 940int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 941struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 942bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
 943bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 944unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
 945void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn);
 946void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
 947
 948struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
 949struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
 950kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
 951kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 952int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
 953struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
 954void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
 955unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
 956unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
 957int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
 958                             int len);
 959int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
 960                               unsigned long len);
 961int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
 962                        unsigned long len);
 963int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
 964                              int offset, int len);
 965int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
 966                         unsigned long len);
 967void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
 968
 969void kvm_sigset_activate(struct kvm_vcpu *vcpu);
 970void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
 971
 972void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 973void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
 974void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
 975bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
 976void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 977int kvm_vcpu_yield_to(struct kvm_vcpu *target);
 978void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
 979
 980void kvm_flush_remote_tlbs(struct kvm *kvm);
 981void kvm_reload_remote_mmus(struct kvm *kvm);
 982
 983#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
 984int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
 985int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
 986void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
 987void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
 988#endif
 989
 990void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
 991                                   unsigned long end);
 992void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
 993                                   unsigned long end);
 994
 995long kvm_arch_dev_ioctl(struct file *filp,
 996                        unsigned int ioctl, unsigned long arg);
 997long kvm_arch_vcpu_ioctl(struct file *filp,
 998                         unsigned int ioctl, unsigned long arg);
 999vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
1000
1001int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
1002
1003void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1004                                        struct kvm_memory_slot *slot,
1005                                        gfn_t gfn_offset,
1006                                        unsigned long mask);
1007void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
1008
1009#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1010void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
1011                                        const struct kvm_memory_slot *memslot);
1012#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
1013int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
1014int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1015                      int *is_dirty, struct kvm_memory_slot **memslot);
1016#endif
1017
1018int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1019                        bool line_status);
1020int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1021                            struct kvm_enable_cap *cap);
1022long kvm_arch_vm_ioctl(struct file *filp,
1023                       unsigned int ioctl, unsigned long arg);
1024
1025int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1026int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1027
1028int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1029                                    struct kvm_translation *tr);
1030
1031int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1032int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1033int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1034                                  struct kvm_sregs *sregs);
1035int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1036                                  struct kvm_sregs *sregs);
1037int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1038                                    struct kvm_mp_state *mp_state);
1039int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1040                                    struct kvm_mp_state *mp_state);
1041int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1042                                        struct kvm_guest_debug *dbg);
1043int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
1044
1045int kvm_arch_init(void *opaque);
1046void kvm_arch_exit(void);
1047
1048void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
1049
1050void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
1051void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
1052int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
1053int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
1054void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
1055void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
1056
1057#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
1058int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
1059#endif
1060
1061#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
1062void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
1063#endif
1064
1065int kvm_arch_hardware_enable(void);
1066void kvm_arch_hardware_disable(void);
1067int kvm_arch_hardware_setup(void *opaque);
1068void kvm_arch_hardware_unsetup(void);
1069int kvm_arch_check_processor_compat(void *opaque);
1070int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
1071bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
1072int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
1073bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
1074bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
1075int kvm_arch_post_init_vm(struct kvm *kvm);
1076void kvm_arch_pre_destroy_vm(struct kvm *kvm);
1077int kvm_arch_create_vm_debugfs(struct kvm *kvm);
1078
1079#ifndef __KVM_HAVE_ARCH_VM_ALLOC
1080/*
1081 * All architectures that want to use vzalloc currently also
1082 * need their own kvm_arch_alloc_vm implementation.
1083 */
1084static inline struct kvm *kvm_arch_alloc_vm(void)
1085{
1086        return kzalloc(sizeof(struct kvm), GFP_KERNEL);
1087}
1088#endif
1089
1090static inline void __kvm_arch_free_vm(struct kvm *kvm)
1091{
1092        kvfree(kvm);
1093}
1094
1095#ifndef __KVM_HAVE_ARCH_VM_FREE
1096static inline void kvm_arch_free_vm(struct kvm *kvm)
1097{
1098        __kvm_arch_free_vm(kvm);
1099}
1100#endif
1101
1102#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
1103static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
1104{
1105        return -ENOTSUPP;
1106}
1107#endif
1108
1109#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
1110void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
1111void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
1112bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
1113#else
1114static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
1115{
1116}
1117
1118static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
1119{
1120}
1121
1122static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
1123{
1124        return false;
1125}
1126#endif
1127#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
1128void kvm_arch_start_assignment(struct kvm *kvm);
1129void kvm_arch_end_assignment(struct kvm *kvm);
1130bool kvm_arch_has_assigned_device(struct kvm *kvm);
1131#else
1132static inline void kvm_arch_start_assignment(struct kvm *kvm)
1133{
1134}
1135
1136static inline void kvm_arch_end_assignment(struct kvm *kvm)
1137{
1138}
1139
1140static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
1141{
1142        return false;
1143}
1144#endif
1145
1146static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
1147{
1148#ifdef __KVM_HAVE_ARCH_WQP
1149        return vcpu->arch.waitp;
1150#else
1151        return &vcpu->wait;
1152#endif
1153}
1154
1155#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
1156/*
1157 * returns true if the virtual interrupt controller is initialized and
1158 * ready to accept virtual IRQ. On some architectures the virtual interrupt
1159 * controller is dynamically instantiated and this is not always true.
1160 */
1161bool kvm_arch_intc_initialized(struct kvm *kvm);
1162#else
1163static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
1164{
1165        return true;
1166}
1167#endif
1168
1169int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
1170void kvm_arch_destroy_vm(struct kvm *kvm);
1171void kvm_arch_sync_events(struct kvm *kvm);
1172
1173int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
1174
1175bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
1176bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
1177bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);
1178
1179struct kvm_irq_ack_notifier {
1180        struct hlist_node link;
1181        unsigned gsi;
1182        void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
1183};
1184
1185int kvm_irq_map_gsi(struct kvm *kvm,
1186                    struct kvm_kernel_irq_routing_entry *entries, int gsi);
1187int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1188
1189int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1190                bool line_status);
1191int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1192                int irq_source_id, int level, bool line_status);
1193int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
1194                               struct kvm *kvm, int irq_source_id,
1195                               int level, bool line_status);
1196bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1197void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1198void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1199void kvm_register_irq_ack_notifier(struct kvm *kvm,
1200                                   struct kvm_irq_ack_notifier *kian);
1201void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1202                                   struct kvm_irq_ack_notifier *kian);
1203int kvm_request_irq_source_id(struct kvm *kvm);
1204void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
1205bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1206
1207/*
1208 * Returns a pointer to the memslot at slot_index if it contains gfn.
1209 * Otherwise returns NULL.
1210 */
1211static inline struct kvm_memory_slot *
1212try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn)
1213{
1214        struct kvm_memory_slot *slot;
1215
1216        if (slot_index < 0 || slot_index >= slots->used_slots)
1217                return NULL;
1218
1219        /*
1220         * slot_index can come from vcpu->last_used_slot which is not kept
1221         * in sync with userspace-controllable memslot deletion. So use nospec
1222         * to prevent the CPU from speculating past the end of memslots[].
1223         */
1224        slot_index = array_index_nospec(slot_index, slots->used_slots);
1225        slot = &slots->memslots[slot_index];
1226
1227        if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
1228                return slot;
1229        else
1230                return NULL;
1231}
1232
1233/*
1234 * Returns a pointer to the memslot that contains gfn and records the index of
1235 * the slot in index. Otherwise returns NULL.
1236 *
1237 * IMPORTANT: Slots are sorted from highest GFN to lowest GFN!
1238 */
1239static inline struct kvm_memory_slot *
1240search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index)
1241{
1242        int start = 0, end = slots->used_slots;
1243        struct kvm_memory_slot *memslots = slots->memslots;
1244        struct kvm_memory_slot *slot;
1245
1246        if (unlikely(!slots->used_slots))
1247                return NULL;
1248
1249        while (start < end) {
1250                int slot = start + (end - start) / 2;
1251
1252                if (gfn >= memslots[slot].base_gfn)
1253                        end = slot;
1254                else
1255                        start = slot + 1;
1256        }
1257
1258        slot = try_get_memslot(slots, start, gfn);
1259        if (slot) {
1260                *index = start;
1261                return slot;
1262        }
1263
1264        return NULL;
1265}
1266
1267/*
1268 * __gfn_to_memslot() and its descendants are here because it is called from
1269 * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot()
1270 * itself isn't here as an inline because that would bloat other code too much.
1271 */
1272static inline struct kvm_memory_slot *
1273__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
1274{
1275        struct kvm_memory_slot *slot;
1276        int slot_index = atomic_read(&slots->last_used_slot);
1277
1278        slot = try_get_memslot(slots, slot_index, gfn);
1279        if (slot)
1280                return slot;
1281
1282        slot = search_memslots(slots, gfn, &slot_index);
1283        if (slot) {
1284                atomic_set(&slots->last_used_slot, slot_index);
1285                return slot;
1286        }
1287
1288        return NULL;
1289}
1290
1291static inline unsigned long
1292__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
1293{
1294        /*
1295         * The index was checked originally in search_memslots.  To avoid
1296         * that a malicious guest builds a Spectre gadget out of e.g. page
1297         * table walks, do not let the processor speculate loads outside
1298         * the guest's registered memslots.
1299         */
1300        unsigned long offset = gfn - slot->base_gfn;
1301        offset = array_index_nospec(offset, slot->npages);
1302        return slot->userspace_addr + offset * PAGE_SIZE;
1303}
1304
1305static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1306{
1307        return gfn_to_memslot(kvm, gfn)->id;
1308}
1309
1310static inline gfn_t
1311hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
1312{
1313        gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
1314
1315        return slot->base_gfn + gfn_offset;
1316}
1317
1318static inline gpa_t gfn_to_gpa(gfn_t gfn)
1319{
1320        return (gpa_t)gfn << PAGE_SHIFT;
1321}
1322
1323static inline gfn_t gpa_to_gfn(gpa_t gpa)
1324{
1325        return (gfn_t)(gpa >> PAGE_SHIFT);
1326}
1327
1328static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
1329{
1330        return (hpa_t)pfn << PAGE_SHIFT;
1331}
1332
1333static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
1334                                                gpa_t gpa)
1335{
1336        return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
1337}
1338
1339static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
1340{
1341        unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
1342
1343        return kvm_is_error_hva(hva);
1344}
1345
1346enum kvm_stat_kind {
1347        KVM_STAT_VM,
1348        KVM_STAT_VCPU,
1349};
1350
1351struct kvm_stat_data {
1352        struct kvm *kvm;
1353        const struct _kvm_stats_desc *desc;
1354        enum kvm_stat_kind kind;
1355};
1356
1357struct _kvm_stats_desc {
1358        struct kvm_stats_desc desc;
1359        char name[KVM_STATS_NAME_SIZE];
1360};
1361
1362#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz)                      \
1363        .flags = type | unit | base |                                          \
1364                 BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) |              \
1365                 BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) |              \
1366                 BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK),               \
1367        .exponent = exp,                                                       \
1368        .size = sz,                                                            \
1369        .bucket_size = bsz
1370
1371#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz)            \
1372        {                                                                      \
1373                {                                                              \
1374                        STATS_DESC_COMMON(type, unit, base, exp, sz, bsz),     \
1375                        .offset = offsetof(struct kvm_vm_stat, generic.stat)   \
1376                },                                                             \
1377                .name = #stat,                                                 \
1378        }
1379#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz)          \
1380        {                                                                      \
1381                {                                                              \
1382                        STATS_DESC_COMMON(type, unit, base, exp, sz, bsz),     \
1383                        .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
1384                },                                                             \
1385                .name = #stat,                                                 \
1386        }
1387#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz)                    \
1388        {                                                                      \
1389                {                                                              \
1390                        STATS_DESC_COMMON(type, unit, base, exp, sz, bsz),     \
1391                        .offset = offsetof(struct kvm_vm_stat, stat)           \
1392                },                                                             \
1393                .name = #stat,                                                 \
1394        }
1395#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz)                  \
1396        {                                                                      \
1397                {                                                              \
1398                        STATS_DESC_COMMON(type, unit, base, exp, sz, bsz),     \
1399                        .offset = offsetof(struct kvm_vcpu_stat, stat)         \
1400                },                                                             \
1401                .name = #stat,                                                 \
1402        }
1403/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
1404#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz)                \
1405        SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
1406
1407#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent)               \
1408        STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE,                     \
1409                unit, base, exponent, 1, 0)
1410#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent)                  \
1411        STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT,                        \
1412                unit, base, exponent, 1, 0)
1413#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent)                     \
1414        STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK,                           \
1415                unit, base, exponent, 1, 0)
1416#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz)     \
1417        STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST,                    \
1418                unit, base, exponent, sz, bsz)
1419#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz)             \
1420        STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST,                       \
1421                unit, base, exponent, sz, 0)
1422
1423/* Cumulative counter, read/write */
1424#define STATS_DESC_COUNTER(SCOPE, name)                                        \
1425        STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE,                \
1426                KVM_STATS_BASE_POW10, 0)
1427/* Instantaneous counter, read only */
1428#define STATS_DESC_ICOUNTER(SCOPE, name)                                       \
1429        STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE,                   \
1430                KVM_STATS_BASE_POW10, 0)
1431/* Peak counter, read/write */
1432#define STATS_DESC_PCOUNTER(SCOPE, name)                                       \
1433        STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE,                      \
1434                KVM_STATS_BASE_POW10, 0)
1435
1436/* Cumulative time in nanosecond */
1437#define STATS_DESC_TIME_NSEC(SCOPE, name)                                      \
1438        STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS,             \
1439                KVM_STATS_BASE_POW10, -9)
1440/* Linear histogram for time in nanosecond */
1441#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz)                     \
1442        STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS,            \
1443                KVM_STATS_BASE_POW10, -9, sz, bsz)
1444/* Logarithmic histogram for time in nanosecond */
1445#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz)                          \
1446        STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS,               \
1447                KVM_STATS_BASE_POW10, -9, sz)
1448
1449#define KVM_GENERIC_VM_STATS()                                                 \
1450        STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush),                      \
1451        STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)
1452
1453#define KVM_GENERIC_VCPU_STATS()                                               \
1454        STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll),                \
1455        STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll),                 \
1456        STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid),                   \
1457        STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup),                         \
1458        STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns),              \
1459        STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns),                 \
1460        STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns),                      \
1461        STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist,     \
1462                        HALT_POLL_HIST_COUNT),                                 \
1463        STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist,        \
1464                        HALT_POLL_HIST_COUNT),                                 \
1465        STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist,             \
1466                        HALT_POLL_HIST_COUNT)
1467
1468extern struct dentry *kvm_debugfs_dir;
1469
1470ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
1471                       const struct _kvm_stats_desc *desc,
1472                       void *stats, size_t size_stats,
1473                       char __user *user_buffer, size_t size, loff_t *offset);
1474
1475/**
1476 * kvm_stats_linear_hist_update() - Update bucket value for linear histogram
1477 * statistics data.
1478 *
1479 * @data: start address of the stats data
1480 * @size: the number of bucket of the stats data
1481 * @value: the new value used to update the linear histogram's bucket
1482 * @bucket_size: the size (width) of a bucket
1483 */
1484static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
1485                                                u64 value, size_t bucket_size)
1486{
1487        size_t index = div64_u64(value, bucket_size);
1488
1489        index = min(index, size - 1);
1490        ++data[index];
1491}
1492
1493/**
1494 * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram
1495 * statistics data.
1496 *
1497 * @data: start address of the stats data
1498 * @size: the number of bucket of the stats data
1499 * @value: the new value used to update the logarithmic histogram's bucket
1500 */
1501static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
1502{
1503        size_t index = fls64(value);
1504
1505        index = min(index, size - 1);
1506        ++data[index];
1507}
1508
1509#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize)                      \
1510        kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
1511#define KVM_STATS_LOG_HIST_UPDATE(array, value)                                \
1512        kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)
1513
1514
1515extern const struct kvm_stats_header kvm_vm_stats_header;
1516extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
1517extern const struct kvm_stats_header kvm_vcpu_stats_header;
1518extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
1519
1520#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1521static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
1522{
1523        if (unlikely(kvm->mmu_notifier_count))
1524                return 1;
1525        /*
1526         * Ensure the read of mmu_notifier_count happens before the read
1527         * of mmu_notifier_seq.  This interacts with the smp_wmb() in
1528         * mmu_notifier_invalidate_range_end to make sure that the caller
1529         * either sees the old (non-zero) value of mmu_notifier_count or
1530         * the new (incremented) value of mmu_notifier_seq.
1531         * PowerPC Book3s HV KVM calls this under a per-page lock
1532         * rather than under kvm->mmu_lock, for scalability, so
1533         * can't rely on kvm->mmu_lock to keep things ordered.
1534         */
1535        smp_rmb();
1536        if (kvm->mmu_notifier_seq != mmu_seq)
1537                return 1;
1538        return 0;
1539}
1540
1541static inline int mmu_notifier_retry_hva(struct kvm *kvm,
1542                                         unsigned long mmu_seq,
1543                                         unsigned long hva)
1544{
1545        lockdep_assert_held(&kvm->mmu_lock);
1546        /*
1547         * If mmu_notifier_count is non-zero, then the range maintained by
1548         * kvm_mmu_notifier_invalidate_range_start contains all addresses that
1549         * might be being invalidated. Note that it may include some false
1550         * positives, due to shortcuts when handing concurrent invalidations.
1551         */
1552        if (unlikely(kvm->mmu_notifier_count) &&
1553            hva >= kvm->mmu_notifier_range_start &&
1554            hva < kvm->mmu_notifier_range_end)
1555                return 1;
1556        if (kvm->mmu_notifier_seq != mmu_seq)
1557                return 1;
1558        return 0;
1559}
1560#endif
1561
1562#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1563
1564#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
1565
1566bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
1567int kvm_set_irq_routing(struct kvm *kvm,
1568                        const struct kvm_irq_routing_entry *entries,
1569                        unsigned nr,
1570                        unsigned flags);
1571int kvm_set_routing_entry(struct kvm *kvm,
1572                          struct kvm_kernel_irq_routing_entry *e,
1573                          const struct kvm_irq_routing_entry *ue);
1574void kvm_free_irq_routing(struct kvm *kvm);
1575
1576#else
1577
1578static inline void kvm_free_irq_routing(struct kvm *kvm) {}
1579
1580#endif
1581
1582int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1583
1584#ifdef CONFIG_HAVE_KVM_EVENTFD
1585
1586void kvm_eventfd_init(struct kvm *kvm);
1587int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1588
1589#ifdef CONFIG_HAVE_KVM_IRQFD
1590int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1591void kvm_irqfd_release(struct kvm *kvm);
1592void kvm_irq_routing_update(struct kvm *);
1593#else
1594static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1595{
1596        return -EINVAL;
1597}
1598
1599static inline void kvm_irqfd_release(struct kvm *kvm) {}
1600#endif
1601
1602#else
1603
1604static inline void kvm_eventfd_init(struct kvm *kvm) {}
1605
1606static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1607{
1608        return -EINVAL;
1609}
1610
1611static inline void kvm_irqfd_release(struct kvm *kvm) {}
1612
1613#ifdef CONFIG_HAVE_KVM_IRQCHIP
1614static inline void kvm_irq_routing_update(struct kvm *kvm)
1615{
1616}
1617#endif
1618
1619static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1620{
1621        return -ENOSYS;
1622}
1623
1624#endif /* CONFIG_HAVE_KVM_EVENTFD */
1625
1626void kvm_arch_irq_routing_update(struct kvm *kvm);
1627
1628static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1629{
1630        /*
1631         * Ensure the rest of the request is published to kvm_check_request's
1632         * caller.  Paired with the smp_mb__after_atomic in kvm_check_request.
1633         */
1634        smp_wmb();
1635        set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1636}
1637
1638static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
1639{
1640        return READ_ONCE(vcpu->requests);
1641}
1642
1643static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
1644{
1645        return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1646}
1647
1648static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
1649{
1650        clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1651}
1652
1653static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1654{
1655        if (kvm_test_request(req, vcpu)) {
1656                kvm_clear_request(req, vcpu);
1657
1658                /*
1659                 * Ensure the rest of the request is visible to kvm_check_request's
1660                 * caller.  Paired with the smp_wmb in kvm_make_request.
1661                 */
1662                smp_mb__after_atomic();
1663                return true;
1664        } else {
1665                return false;
1666        }
1667}
1668
1669extern bool kvm_rebooting;
1670
1671extern unsigned int halt_poll_ns;
1672extern unsigned int halt_poll_ns_grow;
1673extern unsigned int halt_poll_ns_grow_start;
1674extern unsigned int halt_poll_ns_shrink;
1675
1676struct kvm_device {
1677        const struct kvm_device_ops *ops;
1678        struct kvm *kvm;
1679        void *private;
1680        struct list_head vm_node;
1681};
1682
1683/* create, destroy, and name are mandatory */
1684struct kvm_device_ops {
1685        const char *name;
1686
1687        /*
1688         * create is called holding kvm->lock and any operations not suitable
1689         * to do while holding the lock should be deferred to init (see
1690         * below).
1691         */
1692        int (*create)(struct kvm_device *dev, u32 type);
1693
1694        /*
1695         * init is called after create if create is successful and is called
1696         * outside of holding kvm->lock.
1697         */
1698        void (*init)(struct kvm_device *dev);
1699
1700        /*
1701         * Destroy is responsible for freeing dev.
1702         *
1703         * Destroy may be called before or after destructors are called
1704         * on emulated I/O regions, depending on whether a reference is
1705         * held by a vcpu or other kvm component that gets destroyed
1706         * after the emulated I/O.
1707         */
1708        void (*destroy)(struct kvm_device *dev);
1709
1710        /*
1711         * Release is an alternative method to free the device. It is
1712         * called when the device file descriptor is closed. Once
1713         * release is called, the destroy method will not be called
1714         * anymore as the device is removed from the device list of
1715         * the VM. kvm->lock is held.
1716         */
1717        void (*release)(struct kvm_device *dev);
1718
1719        int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1720        int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1721        int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1722        long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1723                      unsigned long arg);
1724        int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
1725};
1726
1727void kvm_device_get(struct kvm_device *dev);
1728void kvm_device_put(struct kvm_device *dev);
1729struct kvm_device *kvm_device_from_filp(struct file *filp);
1730int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
1731void kvm_unregister_device_ops(u32 type);
1732
1733extern struct kvm_device_ops kvm_mpic_ops;
1734extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
1735extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
1736
1737#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1738
1739static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1740{
1741        vcpu->spin_loop.in_spin_loop = val;
1742}
1743static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1744{
1745        vcpu->spin_loop.dy_eligible = val;
1746}
1747
1748#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1749
1750static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1751{
1752}
1753
1754static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1755{
1756}
1757#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1758
1759static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
1760{
1761        return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
1762                !(memslot->flags & KVM_MEMSLOT_INVALID));
1763}
1764
1765struct kvm_vcpu *kvm_get_running_vcpu(void);
1766struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
1767
1768#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
1769bool kvm_arch_has_irq_bypass(void);
1770int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
1771                           struct irq_bypass_producer *);
1772void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
1773                           struct irq_bypass_producer *);
1774void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
1775void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
1776int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
1777                                  uint32_t guest_irq, bool set);
1778bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
1779                                  struct kvm_kernel_irq_routing_entry *);
1780#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
1781
1782#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
1783/* If we wakeup during the poll time, was it a sucessful poll? */
1784static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1785{
1786        return vcpu->valid_wakeup;
1787}
1788
1789#else
1790static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1791{
1792        return true;
1793}
1794#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
1795
1796#ifdef CONFIG_HAVE_KVM_NO_POLL
1797/* Callback that tells if we must not poll */
1798bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
1799#else
1800static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
1801{
1802        return false;
1803}
1804#endif /* CONFIG_HAVE_KVM_NO_POLL */
1805
1806#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
1807long kvm_arch_vcpu_async_ioctl(struct file *filp,
1808                               unsigned int ioctl, unsigned long arg);
1809#else
1810static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
1811                                             unsigned int ioctl,
1812                                             unsigned long arg)
1813{
1814        return -ENOIOCTLCMD;
1815}
1816#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
1817
1818void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
1819                                            unsigned long start, unsigned long end);
1820
1821#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
1822int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
1823#else
1824static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
1825{
1826        return 0;
1827}
1828#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
1829
1830typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
1831
1832int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
1833                                uintptr_t data, const char *name,
1834                                struct task_struct **thread_ptr);
1835
1836#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
1837static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
1838{
1839        vcpu->run->exit_reason = KVM_EXIT_INTR;
1840        vcpu->stat.signal_exits++;
1841}
1842#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
1843
1844/*
1845 * This defines how many reserved entries we want to keep before we
1846 * kick the vcpu to the userspace to avoid dirty ring full.  This
1847 * value can be tuned to higher if e.g. PML is enabled on the host.
1848 */
1849#define  KVM_DIRTY_RING_RSVD_ENTRIES  64
1850
1851/* Max number of entries allowed for each kvm dirty ring */
1852#define  KVM_DIRTY_RING_MAX_ENTRIES  65536
1853
1854#endif
1855