linux/include/linux/kvm_host.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2#ifndef __KVM_HOST_H
   3#define __KVM_HOST_H
   4
   5
   6#include <linux/types.h>
   7#include <linux/hardirq.h>
   8#include <linux/list.h>
   9#include <linux/mutex.h>
  10#include <linux/spinlock.h>
  11#include <linux/signal.h>
  12#include <linux/sched.h>
  13#include <linux/sched/stat.h>
  14#include <linux/bug.h>
  15#include <linux/minmax.h>
  16#include <linux/mm.h>
  17#include <linux/mmu_notifier.h>
  18#include <linux/preempt.h>
  19#include <linux/msi.h>
  20#include <linux/slab.h>
  21#include <linux/vmalloc.h>
  22#include <linux/rcupdate.h>
  23#include <linux/ratelimit.h>
  24#include <linux/err.h>
  25#include <linux/irqflags.h>
  26#include <linux/context_tracking.h>
  27#include <linux/irqbypass.h>
  28#include <linux/rcuwait.h>
  29#include <linux/refcount.h>
  30#include <linux/nospec.h>
  31#include <linux/notifier.h>
  32#include <linux/ftrace.h>
  33#include <linux/hashtable.h>
  34#include <linux/instrumentation.h>
  35#include <linux/interval_tree.h>
  36#include <linux/rbtree.h>
  37#include <linux/xarray.h>
  38#include <asm/signal.h>
  39
  40#include <linux/kvm.h>
  41#include <linux/kvm_para.h>
  42
  43#include <linux/kvm_types.h>
  44
  45#include <asm/kvm_host.h>
  46#include <linux/kvm_dirty_ring.h>
  47
  48#ifndef KVM_MAX_VCPU_IDS
  49#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
  50#endif
  51
  52/*
  53 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
  54 * in kvm, other bits are visible for userspace which are defined in
  55 * include/linux/kvm_h.
  56 */
  57#define KVM_MEMSLOT_INVALID     (1UL << 16)
  58
  59/*
  60 * Bit 63 of the memslot generation number is an "update in-progress flag",
  61 * e.g. is temporarily set for the duration of install_new_memslots().
  62 * This flag effectively creates a unique generation number that is used to
  63 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
  64 * i.e. may (or may not) have come from the previous memslots generation.
  65 *
  66 * This is necessary because the actual memslots update is not atomic with
  67 * respect to the generation number update.  Updating the generation number
  68 * first would allow a vCPU to cache a spte from the old memslots using the
  69 * new generation number, and updating the generation number after switching
  70 * to the new memslots would allow cache hits using the old generation number
  71 * to reference the defunct memslots.
  72 *
  73 * This mechanism is used to prevent getting hits in KVM's caches while a
  74 * memslot update is in-progress, and to prevent cache hits *after* updating
  75 * the actual generation number against accesses that were inserted into the
  76 * cache *before* the memslots were updated.
  77 */
  78#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS      BIT_ULL(63)
  79
  80/* Two fragments for cross MMIO pages. */
  81#define KVM_MAX_MMIO_FRAGMENTS  2
  82
  83#ifndef KVM_ADDRESS_SPACE_NUM
  84#define KVM_ADDRESS_SPACE_NUM   1
  85#endif
  86
  87/*
  88 * For the normal pfn, the highest 12 bits should be zero,
  89 * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
  90 * mask bit 63 to indicate the noslot pfn.
  91 */
  92#define KVM_PFN_ERR_MASK        (0x7ffULL << 52)
  93#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
  94#define KVM_PFN_NOSLOT          (0x1ULL << 63)
  95
  96#define KVM_PFN_ERR_FAULT       (KVM_PFN_ERR_MASK)
  97#define KVM_PFN_ERR_HWPOISON    (KVM_PFN_ERR_MASK + 1)
  98#define KVM_PFN_ERR_RO_FAULT    (KVM_PFN_ERR_MASK + 2)
  99
 100/*
 101 * error pfns indicate that the gfn is in slot but faild to
 102 * translate it to pfn on host.
 103 */
 104static inline bool is_error_pfn(kvm_pfn_t pfn)
 105{
 106        return !!(pfn & KVM_PFN_ERR_MASK);
 107}
 108
 109/*
 110 * error_noslot pfns indicate that the gfn can not be
 111 * translated to pfn - it is not in slot or failed to
 112 * translate it to pfn.
 113 */
 114static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
 115{
 116        return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
 117}
 118
 119/* noslot pfn indicates that the gfn is not in slot. */
 120static inline bool is_noslot_pfn(kvm_pfn_t pfn)
 121{
 122        return pfn == KVM_PFN_NOSLOT;
 123}
 124
 125/*
 126 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
 127 * provide own defines and kvm_is_error_hva
 128 */
 129#ifndef KVM_HVA_ERR_BAD
 130
 131#define KVM_HVA_ERR_BAD         (PAGE_OFFSET)
 132#define KVM_HVA_ERR_RO_BAD      (PAGE_OFFSET + PAGE_SIZE)
 133
 134static inline bool kvm_is_error_hva(unsigned long addr)
 135{
 136        return addr >= PAGE_OFFSET;
 137}
 138
 139#endif
 140
 141#define KVM_ERR_PTR_BAD_PAGE    (ERR_PTR(-ENOENT))
 142
 143static inline bool is_error_page(struct page *page)
 144{
 145        return IS_ERR(page);
 146}
 147
 148#define KVM_REQUEST_MASK           GENMASK(7,0)
 149#define KVM_REQUEST_NO_WAKEUP      BIT(8)
 150#define KVM_REQUEST_WAIT           BIT(9)
 151/*
 152 * Architecture-independent vcpu->requests bit members
 153 * Bits 4-7 are reserved for more arch-independent bits.
 154 */
 155#define KVM_REQ_TLB_FLUSH         (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 156#define KVM_REQ_MMU_RELOAD        (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 157#define KVM_REQ_UNBLOCK           2
 158#define KVM_REQ_UNHALT            3
 159#define KVM_REQ_VM_DEAD           (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 160#define KVM_REQ_GPC_INVALIDATE    (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 161#define KVM_REQUEST_ARCH_BASE     8
 162
 163#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
 164        BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
 165        (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
 166})
 167#define KVM_ARCH_REQ(nr)           KVM_ARCH_REQ_FLAGS(nr, 0)
 168
 169bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
 170                                 unsigned long *vcpu_bitmap);
 171bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
 172bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
 173                                      struct kvm_vcpu *except);
 174bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
 175                                unsigned long *vcpu_bitmap);
 176
 177#define KVM_USERSPACE_IRQ_SOURCE_ID             0
 178#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID        1
 179
 180extern struct mutex kvm_lock;
 181extern struct list_head vm_list;
 182
 183struct kvm_io_range {
 184        gpa_t addr;
 185        int len;
 186        struct kvm_io_device *dev;
 187};
 188
 189#define NR_IOBUS_DEVS 1000
 190
 191struct kvm_io_bus {
 192        int dev_count;
 193        int ioeventfd_count;
 194        struct kvm_io_range range[];
 195};
 196
 197enum kvm_bus {
 198        KVM_MMIO_BUS,
 199        KVM_PIO_BUS,
 200        KVM_VIRTIO_CCW_NOTIFY_BUS,
 201        KVM_FAST_MMIO_BUS,
 202        KVM_NR_BUSES
 203};
 204
 205int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
 206                     int len, const void *val);
 207int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
 208                            gpa_t addr, int len, const void *val, long cookie);
 209int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
 210                    int len, void *val);
 211int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 212                            int len, struct kvm_io_device *dev);
 213int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 214                              struct kvm_io_device *dev);
 215struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 216                                         gpa_t addr);
 217
 218#ifdef CONFIG_KVM_ASYNC_PF
 219struct kvm_async_pf {
 220        struct work_struct work;
 221        struct list_head link;
 222        struct list_head queue;
 223        struct kvm_vcpu *vcpu;
 224        struct mm_struct *mm;
 225        gpa_t cr2_or_gpa;
 226        unsigned long addr;
 227        struct kvm_arch_async_pf arch;
 228        bool   wakeup_all;
 229        bool notpresent_injected;
 230};
 231
 232void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
 233void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
 234bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 235                        unsigned long hva, struct kvm_arch_async_pf *arch);
 236int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 237#endif
 238
 239#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
 240struct kvm_gfn_range {
 241        struct kvm_memory_slot *slot;
 242        gfn_t start;
 243        gfn_t end;
 244        pte_t pte;
 245        bool may_block;
 246};
 247bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
 248bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
 249bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
 250bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
 251#endif
 252
 253enum {
 254        OUTSIDE_GUEST_MODE,
 255        IN_GUEST_MODE,
 256        EXITING_GUEST_MODE,
 257        READING_SHADOW_PAGE_TABLES,
 258};
 259
 260#define KVM_UNMAPPED_PAGE       ((void *) 0x500 + POISON_POINTER_DELTA)
 261
 262struct kvm_host_map {
 263        /*
 264         * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
 265         * a 'struct page' for it. When using mem= kernel parameter some memory
 266         * can be used as guest memory but they are not managed by host
 267         * kernel).
 268         * If 'pfn' is not managed by the host kernel, this field is
 269         * initialized to KVM_UNMAPPED_PAGE.
 270         */
 271        struct page *page;
 272        void *hva;
 273        kvm_pfn_t pfn;
 274        kvm_pfn_t gfn;
 275};
 276
 277/*
 278 * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
 279 * directly to check for that.
 280 */
 281static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
 282{
 283        return !!map->hva;
 284}
 285
 286static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
 287{
 288        return single_task_running() && !need_resched() && ktime_before(cur, stop);
 289}
 290
 291/*
 292 * Sometimes a large or cross-page mmio needs to be broken up into separate
 293 * exits for userspace servicing.
 294 */
 295struct kvm_mmio_fragment {
 296        gpa_t gpa;
 297        void *data;
 298        unsigned len;
 299};
 300
 301struct kvm_vcpu {
 302        struct kvm *kvm;
 303#ifdef CONFIG_PREEMPT_NOTIFIERS
 304        struct preempt_notifier preempt_notifier;
 305#endif
 306        int cpu;
 307        int vcpu_id; /* id given by userspace at creation */
 308        int vcpu_idx; /* index in kvm->vcpus array */
 309        int srcu_idx;
 310        int mode;
 311        u64 requests;
 312        unsigned long guest_debug;
 313
 314        struct mutex mutex;
 315        struct kvm_run *run;
 316
 317#ifndef __KVM_HAVE_ARCH_WQP
 318        struct rcuwait wait;
 319#endif
 320        struct pid __rcu *pid;
 321        int sigset_active;
 322        sigset_t sigset;
 323        unsigned int halt_poll_ns;
 324        bool valid_wakeup;
 325
 326#ifdef CONFIG_HAS_IOMEM
 327        int mmio_needed;
 328        int mmio_read_completed;
 329        int mmio_is_write;
 330        int mmio_cur_fragment;
 331        int mmio_nr_fragments;
 332        struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
 333#endif
 334
 335#ifdef CONFIG_KVM_ASYNC_PF
 336        struct {
 337                u32 queued;
 338                struct list_head queue;
 339                struct list_head done;
 340                spinlock_t lock;
 341        } async_pf;
 342#endif
 343
 344#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
 345        /*
 346         * Cpu relax intercept or pause loop exit optimization
 347         * in_spin_loop: set when a vcpu does a pause loop exit
 348         *  or cpu relax intercepted.
 349         * dy_eligible: indicates whether vcpu is eligible for directed yield.
 350         */
 351        struct {
 352                bool in_spin_loop;
 353                bool dy_eligible;
 354        } spin_loop;
 355#endif
 356        bool preempted;
 357        bool ready;
 358        struct kvm_vcpu_arch arch;
 359        struct kvm_vcpu_stat stat;
 360        char stats_id[KVM_STATS_NAME_SIZE];
 361        struct kvm_dirty_ring dirty_ring;
 362
 363        /*
 364         * The most recently used memslot by this vCPU and the slots generation
 365         * for which it is valid.
 366         * No wraparound protection is needed since generations won't overflow in
 367         * thousands of years, even assuming 1M memslot operations per second.
 368         */
 369        struct kvm_memory_slot *last_used_slot;
 370        u64 last_used_slot_gen;
 371};
 372
 373/*
 374 * Start accounting time towards a guest.
 375 * Must be called before entering guest context.
 376 */
 377static __always_inline void guest_timing_enter_irqoff(void)
 378{
 379        /*
 380         * This is running in ioctl context so its safe to assume that it's the
 381         * stime pending cputime to flush.
 382         */
 383        instrumentation_begin();
 384        vtime_account_guest_enter();
 385        instrumentation_end();
 386}
 387
 388/*
 389 * Enter guest context and enter an RCU extended quiescent state.
 390 *
 391 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
 392 * unsafe to use any code which may directly or indirectly use RCU, tracing
 393 * (including IRQ flag tracing), or lockdep. All code in this period must be
 394 * non-instrumentable.
 395 */
 396static __always_inline void guest_context_enter_irqoff(void)
 397{
 398        /*
 399         * KVM does not hold any references to rcu protected data when it
 400         * switches CPU into a guest mode. In fact switching to a guest mode
 401         * is very similar to exiting to userspace from rcu point of view. In
 402         * addition CPU may stay in a guest mode for quite a long time (up to
 403         * one time slice). Lets treat guest mode as quiescent state, just like
 404         * we do with user-mode execution.
 405         */
 406        if (!context_tracking_guest_enter()) {
 407                instrumentation_begin();
 408                rcu_virt_note_context_switch(smp_processor_id());
 409                instrumentation_end();
 410        }
 411}
 412
 413/*
 414 * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
 415 * guest_state_enter_irqoff().
 416 */
 417static __always_inline void guest_enter_irqoff(void)
 418{
 419        guest_timing_enter_irqoff();
 420        guest_context_enter_irqoff();
 421}
 422
 423/**
 424 * guest_state_enter_irqoff - Fixup state when entering a guest
 425 *
 426 * Entry to a guest will enable interrupts, but the kernel state is interrupts
 427 * disabled when this is invoked. Also tell RCU about it.
 428 *
 429 * 1) Trace interrupts on state
 430 * 2) Invoke context tracking if enabled to adjust RCU state
 431 * 3) Tell lockdep that interrupts are enabled
 432 *
 433 * Invoked from architecture specific code before entering a guest.
 434 * Must be called with interrupts disabled and the caller must be
 435 * non-instrumentable.
 436 * The caller has to invoke guest_timing_enter_irqoff() before this.
 437 *
 438 * Note: this is analogous to exit_to_user_mode().
 439 */
 440static __always_inline void guest_state_enter_irqoff(void)
 441{
 442        instrumentation_begin();
 443        trace_hardirqs_on_prepare();
 444        lockdep_hardirqs_on_prepare(CALLER_ADDR0);
 445        instrumentation_end();
 446
 447        guest_context_enter_irqoff();
 448        lockdep_hardirqs_on(CALLER_ADDR0);
 449}
 450
 451/*
 452 * Exit guest context and exit an RCU extended quiescent state.
 453 *
 454 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
 455 * unsafe to use any code which may directly or indirectly use RCU, tracing
 456 * (including IRQ flag tracing), or lockdep. All code in this period must be
 457 * non-instrumentable.
 458 */
 459static __always_inline void guest_context_exit_irqoff(void)
 460{
 461        context_tracking_guest_exit();
 462}
 463
 464/*
 465 * Stop accounting time towards a guest.
 466 * Must be called after exiting guest context.
 467 */
 468static __always_inline void guest_timing_exit_irqoff(void)
 469{
 470        instrumentation_begin();
 471        /* Flush the guest cputime we spent on the guest */
 472        vtime_account_guest_exit();
 473        instrumentation_end();
 474}
 475
 476/*
 477 * Deprecated. Architectures should move to guest_state_exit_irqoff() and
 478 * guest_timing_exit_irqoff().
 479 */
 480static __always_inline void guest_exit_irqoff(void)
 481{
 482        guest_context_exit_irqoff();
 483        guest_timing_exit_irqoff();
 484}
 485
 486static inline void guest_exit(void)
 487{
 488        unsigned long flags;
 489
 490        local_irq_save(flags);
 491        guest_exit_irqoff();
 492        local_irq_restore(flags);
 493}
 494
 495/**
 496 * guest_state_exit_irqoff - Establish state when returning from guest mode
 497 *
 498 * Entry from a guest disables interrupts, but guest mode is traced as
 499 * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
 500 *
 501 * 1) Tell lockdep that interrupts are disabled
 502 * 2) Invoke context tracking if enabled to reactivate RCU
 503 * 3) Trace interrupts off state
 504 *
 505 * Invoked from architecture specific code after exiting a guest.
 506 * Must be invoked with interrupts disabled and the caller must be
 507 * non-instrumentable.
 508 * The caller has to invoke guest_timing_exit_irqoff() after this.
 509 *
 510 * Note: this is analogous to enter_from_user_mode().
 511 */
 512static __always_inline void guest_state_exit_irqoff(void)
 513{
 514        lockdep_hardirqs_off(CALLER_ADDR0);
 515        guest_context_exit_irqoff();
 516
 517        instrumentation_begin();
 518        trace_hardirqs_off_finish();
 519        instrumentation_end();
 520}
 521
 522static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
 523{
 524        /*
 525         * The memory barrier ensures a previous write to vcpu->requests cannot
 526         * be reordered with the read of vcpu->mode.  It pairs with the general
 527         * memory barrier following the write of vcpu->mode in VCPU RUN.
 528         */
 529        smp_mb__before_atomic();
 530        return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
 531}
 532
 533/*
 534 * Some of the bitops functions do not support too long bitmaps.
 535 * This number must be determined not to exceed such limits.
 536 */
 537#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 538
 539/*
 540 * Since at idle each memslot belongs to two memslot sets it has to contain
 541 * two embedded nodes for each data structure that it forms a part of.
 542 *
 543 * Two memslot sets (one active and one inactive) are necessary so the VM
 544 * continues to run on one memslot set while the other is being modified.
 545 *
 546 * These two memslot sets normally point to the same set of memslots.
 547 * They can, however, be desynchronized when performing a memslot management
 548 * operation by replacing the memslot to be modified by its copy.
 549 * After the operation is complete, both memslot sets once again point to
 550 * the same, common set of memslot data.
 551 *
 552 * The memslots themselves are independent of each other so they can be
 553 * individually added or deleted.
 554 */
 555struct kvm_memory_slot {
 556        struct hlist_node id_node[2];
 557        struct interval_tree_node hva_node[2];
 558        struct rb_node gfn_node[2];
 559        gfn_t base_gfn;
 560        unsigned long npages;
 561        unsigned long *dirty_bitmap;
 562        struct kvm_arch_memory_slot arch;
 563        unsigned long userspace_addr;
 564        u32 flags;
 565        short id;
 566        u16 as_id;
 567};
 568
 569static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
 570{
 571        return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
 572}
 573
 574static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
 575{
 576        return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
 577}
 578
 579static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
 580{
 581        unsigned long len = kvm_dirty_bitmap_bytes(memslot);
 582
 583        return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
 584}
 585
 586#ifndef KVM_DIRTY_LOG_MANUAL_CAPS
 587#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
 588#endif
 589
 590struct kvm_s390_adapter_int {
 591        u64 ind_addr;
 592        u64 summary_addr;
 593        u64 ind_offset;
 594        u32 summary_offset;
 595        u32 adapter_id;
 596};
 597
 598struct kvm_hv_sint {
 599        u32 vcpu;
 600        u32 sint;
 601};
 602
 603struct kvm_xen_evtchn {
 604        u32 port;
 605        u32 vcpu;
 606        u32 priority;
 607};
 608
 609struct kvm_kernel_irq_routing_entry {
 610        u32 gsi;
 611        u32 type;
 612        int (*set)(struct kvm_kernel_irq_routing_entry *e,
 613                   struct kvm *kvm, int irq_source_id, int level,
 614                   bool line_status);
 615        union {
 616                struct {
 617                        unsigned irqchip;
 618                        unsigned pin;
 619                } irqchip;
 620                struct {
 621                        u32 address_lo;
 622                        u32 address_hi;
 623                        u32 data;
 624                        u32 flags;
 625                        u32 devid;
 626                } msi;
 627                struct kvm_s390_adapter_int adapter;
 628                struct kvm_hv_sint hv_sint;
 629                struct kvm_xen_evtchn xen_evtchn;
 630        };
 631        struct hlist_node link;
 632};
 633
 634#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
 635struct kvm_irq_routing_table {
 636        int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
 637        u32 nr_rt_entries;
 638        /*
 639         * Array indexed by gsi. Each entry contains list of irq chips
 640         * the gsi is connected to.
 641         */
 642        struct hlist_head map[];
 643};
 644#endif
 645
 646#ifndef KVM_PRIVATE_MEM_SLOTS
 647#define KVM_PRIVATE_MEM_SLOTS 0
 648#endif
 649
 650#define KVM_MEM_SLOTS_NUM SHRT_MAX
 651#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS)
 652
 653#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
 654static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
 655{
 656        return 0;
 657}
 658#endif
 659
 660struct kvm_memslots {
 661        u64 generation;
 662        atomic_long_t last_used_slot;
 663        struct rb_root_cached hva_tree;
 664        struct rb_root gfn_tree;
 665        /*
 666         * The mapping table from slot id to memslot.
 667         *
 668         * 7-bit bucket count matches the size of the old id to index array for
 669         * 512 slots, while giving good performance with this slot count.
 670         * Higher bucket counts bring only small performance improvements but
 671         * always result in higher memory usage (even for lower memslot counts).
 672         */
 673        DECLARE_HASHTABLE(id_hash, 7);
 674        int node_idx;
 675};
 676
 677struct kvm {
 678#ifdef KVM_HAVE_MMU_RWLOCK
 679        rwlock_t mmu_lock;
 680#else
 681        spinlock_t mmu_lock;
 682#endif /* KVM_HAVE_MMU_RWLOCK */
 683
 684        struct mutex slots_lock;
 685
 686        /*
 687         * Protects the arch-specific fields of struct kvm_memory_slots in
 688         * use by the VM. To be used under the slots_lock (above) or in a
 689         * kvm->srcu critical section where acquiring the slots_lock would
 690         * lead to deadlock with the synchronize_srcu in
 691         * install_new_memslots.
 692         */
 693        struct mutex slots_arch_lock;
 694        struct mm_struct *mm; /* userspace tied to this vm */
 695        unsigned long nr_memslot_pages;
 696        /* The two memslot sets - active and inactive (per address space) */
 697        struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2];
 698        /* The current active memslot set for each address space */
 699        struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
 700        struct xarray vcpu_array;
 701
 702        /* Used to wait for completion of MMU notifiers.  */
 703        spinlock_t mn_invalidate_lock;
 704        unsigned long mn_active_invalidate_count;
 705        struct rcuwait mn_memslots_update_rcuwait;
 706
 707        /* For management / invalidation of gfn_to_pfn_caches */
 708        spinlock_t gpc_lock;
 709        struct list_head gpc_list;
 710
 711        /*
 712         * created_vcpus is protected by kvm->lock, and is incremented
 713         * at the beginning of KVM_CREATE_VCPU.  online_vcpus is only
 714         * incremented after storing the kvm_vcpu pointer in vcpus,
 715         * and is accessed atomically.
 716         */
 717        atomic_t online_vcpus;
 718        int created_vcpus;
 719        int last_boosted_vcpu;
 720        struct list_head vm_list;
 721        struct mutex lock;
 722        struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
 723#ifdef CONFIG_HAVE_KVM_EVENTFD
 724        struct {
 725                spinlock_t        lock;
 726                struct list_head  items;
 727                struct list_head  resampler_list;
 728                struct mutex      resampler_lock;
 729        } irqfds;
 730        struct list_head ioeventfds;
 731#endif
 732        struct kvm_vm_stat stat;
 733        struct kvm_arch arch;
 734        refcount_t users_count;
 735#ifdef CONFIG_KVM_MMIO
 736        struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
 737        spinlock_t ring_lock;
 738        struct list_head coalesced_zones;
 739#endif
 740
 741        struct mutex irq_lock;
 742#ifdef CONFIG_HAVE_KVM_IRQCHIP
 743        /*
 744         * Update side is protected by irq_lock.
 745         */
 746        struct kvm_irq_routing_table __rcu *irq_routing;
 747#endif
 748#ifdef CONFIG_HAVE_KVM_IRQFD
 749        struct hlist_head irq_ack_notifier_list;
 750#endif
 751
 752#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 753        struct mmu_notifier mmu_notifier;
 754        unsigned long mmu_notifier_seq;
 755        long mmu_notifier_count;
 756        unsigned long mmu_notifier_range_start;
 757        unsigned long mmu_notifier_range_end;
 758#endif
 759        struct list_head devices;
 760        u64 manual_dirty_log_protect;
 761        struct dentry *debugfs_dentry;
 762        struct kvm_stat_data **debugfs_stat_data;
 763        struct srcu_struct srcu;
 764        struct srcu_struct irq_srcu;
 765        pid_t userspace_pid;
 766        unsigned int max_halt_poll_ns;
 767        u32 dirty_ring_size;
 768        bool vm_bugged;
 769        bool vm_dead;
 770
 771#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
 772        struct notifier_block pm_notifier;
 773#endif
 774        char stats_id[KVM_STATS_NAME_SIZE];
 775};
 776
 777#define kvm_err(fmt, ...) \
 778        pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 779#define kvm_info(fmt, ...) \
 780        pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 781#define kvm_debug(fmt, ...) \
 782        pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 783#define kvm_debug_ratelimited(fmt, ...) \
 784        pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
 785                             ## __VA_ARGS__)
 786#define kvm_pr_unimpl(fmt, ...) \
 787        pr_err_ratelimited("kvm [%i]: " fmt, \
 788                           task_tgid_nr(current), ## __VA_ARGS__)
 789
 790/* The guest did something we don't support. */
 791#define vcpu_unimpl(vcpu, fmt, ...)                                     \
 792        kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt,                  \
 793                        (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
 794
 795#define vcpu_debug(vcpu, fmt, ...)                                      \
 796        kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
 797#define vcpu_debug_ratelimited(vcpu, fmt, ...)                          \
 798        kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id,           \
 799                              ## __VA_ARGS__)
 800#define vcpu_err(vcpu, fmt, ...)                                        \
 801        kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
 802
 803static inline void kvm_vm_dead(struct kvm *kvm)
 804{
 805        kvm->vm_dead = true;
 806        kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
 807}
 808
 809static inline void kvm_vm_bugged(struct kvm *kvm)
 810{
 811        kvm->vm_bugged = true;
 812        kvm_vm_dead(kvm);
 813}
 814
 815
 816#define KVM_BUG(cond, kvm, fmt...)                              \
 817({                                                              \
 818        int __ret = (cond);                                     \
 819                                                                \
 820        if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt))         \
 821                kvm_vm_bugged(kvm);                             \
 822        unlikely(__ret);                                        \
 823})
 824
 825#define KVM_BUG_ON(cond, kvm)                                   \
 826({                                                              \
 827        int __ret = (cond);                                     \
 828                                                                \
 829        if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged))           \
 830                kvm_vm_bugged(kvm);                             \
 831        unlikely(__ret);                                        \
 832})
 833
 834static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
 835{
 836        return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
 837}
 838
 839static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
 840{
 841        return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
 842                                      lockdep_is_held(&kvm->slots_lock) ||
 843                                      !refcount_read(&kvm->users_count));
 844}
 845
 846static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 847{
 848        int num_vcpus = atomic_read(&kvm->online_vcpus);
 849        i = array_index_nospec(i, num_vcpus);
 850
 851        /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu.  */
 852        smp_rmb();
 853        return xa_load(&kvm->vcpu_array, i);
 854}
 855
 856#define kvm_for_each_vcpu(idx, vcpup, kvm)                 \
 857        xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
 858                          (atomic_read(&kvm->online_vcpus) - 1))
 859
 860static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
 861{
 862        struct kvm_vcpu *vcpu = NULL;
 863        unsigned long i;
 864
 865        if (id < 0)
 866                return NULL;
 867        if (id < KVM_MAX_VCPUS)
 868                vcpu = kvm_get_vcpu(kvm, id);
 869        if (vcpu && vcpu->vcpu_id == id)
 870                return vcpu;
 871        kvm_for_each_vcpu(i, vcpu, kvm)
 872                if (vcpu->vcpu_id == id)
 873                        return vcpu;
 874        return NULL;
 875}
 876
 877static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
 878{
 879        return vcpu->vcpu_idx;
 880}
 881
 882void kvm_destroy_vcpus(struct kvm *kvm);
 883
 884void vcpu_load(struct kvm_vcpu *vcpu);
 885void vcpu_put(struct kvm_vcpu *vcpu);
 886
 887#ifdef __KVM_HAVE_IOAPIC
 888void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
 889void kvm_arch_post_irq_routing_update(struct kvm *kvm);
 890#else
 891static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
 892{
 893}
 894static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
 895{
 896}
 897#endif
 898
 899#ifdef CONFIG_HAVE_KVM_IRQFD
 900int kvm_irqfd_init(void);
 901void kvm_irqfd_exit(void);
 902#else
 903static inline int kvm_irqfd_init(void)
 904{
 905        return 0;
 906}
 907
 908static inline void kvm_irqfd_exit(void)
 909{
 910}
 911#endif
 912int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 913                  struct module *module);
 914void kvm_exit(void);
 915
 916void kvm_get_kvm(struct kvm *kvm);
 917bool kvm_get_kvm_safe(struct kvm *kvm);
 918void kvm_put_kvm(struct kvm *kvm);
 919bool file_is_kvm(struct file *file);
 920void kvm_put_kvm_no_destroy(struct kvm *kvm);
 921
 922static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
 923{
 924        as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
 925        return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
 926                        lockdep_is_held(&kvm->slots_lock) ||
 927                        !refcount_read(&kvm->users_count));
 928}
 929
 930static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
 931{
 932        return __kvm_memslots(kvm, 0);
 933}
 934
 935static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
 936{
 937        int as_id = kvm_arch_vcpu_memslots_id(vcpu);
 938
 939        return __kvm_memslots(vcpu->kvm, as_id);
 940}
 941
 942static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
 943{
 944        return RB_EMPTY_ROOT(&slots->gfn_tree);
 945}
 946
 947#define kvm_for_each_memslot(memslot, bkt, slots)                             \
 948        hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
 949                if (WARN_ON_ONCE(!memslot->npages)) {                         \
 950                } else
 951
 952static inline
 953struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
 954{
 955        struct kvm_memory_slot *slot;
 956        int idx = slots->node_idx;
 957
 958        hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
 959                if (slot->id == id)
 960                        return slot;
 961        }
 962
 963        return NULL;
 964}
 965
 966/* Iterator used for walking memslots that overlap a gfn range. */
 967struct kvm_memslot_iter {
 968        struct kvm_memslots *slots;
 969        struct rb_node *node;
 970        struct kvm_memory_slot *slot;
 971};
 972
 973static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
 974{
 975        iter->node = rb_next(iter->node);
 976        if (!iter->node)
 977                return;
 978
 979        iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
 980}
 981
 982static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
 983                                          struct kvm_memslots *slots,
 984                                          gfn_t start)
 985{
 986        int idx = slots->node_idx;
 987        struct rb_node *tmp;
 988        struct kvm_memory_slot *slot;
 989
 990        iter->slots = slots;
 991
 992        /*
 993         * Find the so called "upper bound" of a key - the first node that has
 994         * its key strictly greater than the searched one (the start gfn in our case).
 995         */
 996        iter->node = NULL;
 997        for (tmp = slots->gfn_tree.rb_node; tmp; ) {
 998                slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]);
 999                if (start < slot->base_gfn) {
1000                        iter->node = tmp;
1001                        tmp = tmp->rb_left;
1002                } else {
1003                        tmp = tmp->rb_right;
1004                }
1005        }
1006
1007        /*
1008         * Find the slot with the lowest gfn that can possibly intersect with
1009         * the range, so we'll ideally have slot start <= range start
1010         */
1011        if (iter->node) {
1012                /*
1013                 * A NULL previous node means that the very first slot
1014                 * already has a higher start gfn.
1015                 * In this case slot start > range start.
1016                 */
1017                tmp = rb_prev(iter->node);
1018                if (tmp)
1019                        iter->node = tmp;
1020        } else {
1021                /* a NULL node below means no slots */
1022                iter->node = rb_last(&slots->gfn_tree);
1023        }
1024
1025        if (iter->node) {
1026                iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
1027
1028                /*
1029                 * It is possible in the slot start < range start case that the
1030                 * found slot ends before or at range start (slot end <= range start)
1031                 * and so it does not overlap the requested range.
1032                 *
1033                 * In such non-overlapping case the next slot (if it exists) will
1034                 * already have slot start > range start, otherwise the logic above
1035                 * would have found it instead of the current slot.
1036                 */
1037                if (iter->slot->base_gfn + iter->slot->npages <= start)
1038                        kvm_memslot_iter_next(iter);
1039        }
1040}
1041
1042static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
1043{
1044        if (!iter->node)
1045                return false;
1046
1047        /*
1048         * If this slot starts beyond or at the end of the range so does
1049         * every next one
1050         */
1051        return iter->slot->base_gfn < end;
1052}
1053
1054/* Iterate over each memslot at least partially intersecting [start, end) range */
1055#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end)      \
1056        for (kvm_memslot_iter_start(iter, slots, start);                \
1057             kvm_memslot_iter_is_valid(iter, end);                      \
1058             kvm_memslot_iter_next(iter))
1059
1060/*
1061 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
1062 * - create a new memory slot
1063 * - delete an existing memory slot
1064 * - modify an existing memory slot
1065 *   -- move it in the guest physical memory space
1066 *   -- just change its flags
1067 *
1068 * Since flags can be changed by some of these operations, the following
1069 * differentiation is the best we can do for __kvm_set_memory_region():
1070 */
1071enum kvm_mr_change {
1072        KVM_MR_CREATE,
1073        KVM_MR_DELETE,
1074        KVM_MR_MOVE,
1075        KVM_MR_FLAGS_ONLY,
1076};
1077
1078int kvm_set_memory_region(struct kvm *kvm,
1079                          const struct kvm_userspace_memory_region *mem);
1080int __kvm_set_memory_region(struct kvm *kvm,
1081                            const struct kvm_userspace_memory_region *mem);
1082void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
1083void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
1084int kvm_arch_prepare_memory_region(struct kvm *kvm,
1085                                const struct kvm_memory_slot *old,
1086                                struct kvm_memory_slot *new,
1087                                enum kvm_mr_change change);
1088void kvm_arch_commit_memory_region(struct kvm *kvm,
1089                                struct kvm_memory_slot *old,
1090                                const struct kvm_memory_slot *new,
1091                                enum kvm_mr_change change);
1092/* flush all memory translations */
1093void kvm_arch_flush_shadow_all(struct kvm *kvm);
1094/* flush memory translations pointing to 'slot' */
1095void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1096                                   struct kvm_memory_slot *slot);
1097
1098int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1099                            struct page **pages, int nr_pages);
1100
1101struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
1102unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
1103unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
1104unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
1105unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
1106                                      bool *writable);
1107void kvm_release_page_clean(struct page *page);
1108void kvm_release_page_dirty(struct page *page);
1109void kvm_set_page_accessed(struct page *page);
1110
1111kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
1112kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1113                      bool *writable);
1114kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
1115kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
1116kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
1117                               bool atomic, bool *async, bool write_fault,
1118                               bool *writable, hva_t *hva);
1119
1120void kvm_release_pfn_clean(kvm_pfn_t pfn);
1121void kvm_release_pfn_dirty(kvm_pfn_t pfn);
1122void kvm_set_pfn_dirty(kvm_pfn_t pfn);
1123void kvm_set_pfn_accessed(kvm_pfn_t pfn);
1124
1125void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
1126int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1127                        int len);
1128int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
1129int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1130                           void *data, unsigned long len);
1131int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1132                                 void *data, unsigned int offset,
1133                                 unsigned long len);
1134int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1135                         int offset, int len);
1136int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1137                    unsigned long len);
1138int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1139                           void *data, unsigned long len);
1140int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1141                                  void *data, unsigned int offset,
1142                                  unsigned long len);
1143int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1144                              gpa_t gpa, unsigned long len);
1145
1146#define __kvm_get_guest(kvm, gfn, offset, v)                            \
1147({                                                                      \
1148        unsigned long __addr = gfn_to_hva(kvm, gfn);                    \
1149        typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
1150        int __ret = -EFAULT;                                            \
1151                                                                        \
1152        if (!kvm_is_error_hva(__addr))                                  \
1153                __ret = get_user(v, __uaddr);                           \
1154        __ret;                                                          \
1155})
1156
1157#define kvm_get_guest(kvm, gpa, v)                                      \
1158({                                                                      \
1159        gpa_t __gpa = gpa;                                              \
1160        struct kvm *__kvm = kvm;                                        \
1161                                                                        \
1162        __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT,                     \
1163                        offset_in_page(__gpa), v);                      \
1164})
1165
1166#define __kvm_put_guest(kvm, gfn, offset, v)                            \
1167({                                                                      \
1168        unsigned long __addr = gfn_to_hva(kvm, gfn);                    \
1169        typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
1170        int __ret = -EFAULT;                                            \
1171                                                                        \
1172        if (!kvm_is_error_hva(__addr))                                  \
1173                __ret = put_user(v, __uaddr);                           \
1174        if (!__ret)                                                     \
1175                mark_page_dirty(kvm, gfn);                              \
1176        __ret;                                                          \
1177})
1178
1179#define kvm_put_guest(kvm, gpa, v)                                      \
1180({                                                                      \
1181        gpa_t __gpa = gpa;                                              \
1182        struct kvm *__kvm = kvm;                                        \
1183                                                                        \
1184        __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT,                     \
1185                        offset_in_page(__gpa), v);                      \
1186})
1187
1188int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
1189struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
1190bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
1191bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1192unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
1193void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
1194void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
1195
1196struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
1197struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
1198kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
1199kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1200int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
1201struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
1202void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
1203unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
1204unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
1205int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
1206                             int len);
1207int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
1208                               unsigned long len);
1209int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
1210                        unsigned long len);
1211int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
1212                              int offset, int len);
1213int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
1214                         unsigned long len);
1215void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
1216
1217/**
1218 * kvm_gfn_to_pfn_cache_init - prepare a cached kernel mapping and HPA for a
1219 *                             given guest physical address.
1220 *
1221 * @kvm:           pointer to kvm instance.
1222 * @gpc:           struct gfn_to_pfn_cache object.
1223 * @vcpu:          vCPU to be used for marking pages dirty and to be woken on
1224 *                 invalidation.
1225 * @guest_uses_pa: indicates that the resulting host physical PFN is used while
1226 *                 @vcpu is IN_GUEST_MODE so invalidations should wake it.
1227 * @kernel_map:    requests a kernel virtual mapping (kmap / memremap).
1228 * @gpa:           guest physical address to map.
1229 * @len:           sanity check; the range being access must fit a single page.
1230 * @dirty:         mark the cache dirty immediately.
1231 *
1232 * @return:        0 for success.
1233 *                 -EINVAL for a mapping which would cross a page boundary.
1234 *                 -EFAULT for an untranslatable guest physical address.
1235 *
1236 * This primes a gfn_to_pfn_cache and links it into the @kvm's list for
1237 * invalidations to be processed. Invalidation callbacks to @vcpu using
1238 * %KVM_REQ_GPC_INVALIDATE will occur only for MMU notifiers, not for KVM
1239 * memslot changes. Callers are required to use kvm_gfn_to_pfn_cache_check()
1240 * to ensure that the cache is valid before accessing the target page.
1241 */
1242int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
1243                              struct kvm_vcpu *vcpu, bool guest_uses_pa,
1244                              bool kernel_map, gpa_t gpa, unsigned long len,
1245                              bool dirty);
1246
1247/**
1248 * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
1249 *
1250 * @kvm:           pointer to kvm instance.
1251 * @gpc:           struct gfn_to_pfn_cache object.
1252 * @gpa:           current guest physical address to map.
1253 * @len:           sanity check; the range being access must fit a single page.
1254 * @dirty:         mark the cache dirty immediately.
1255 *
1256 * @return:        %true if the cache is still valid and the address matches.
1257 *                 %false if the cache is not valid.
1258 *
1259 * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock
1260 * while calling this function, and then continue to hold the lock until the
1261 * access is complete.
1262 *
1263 * Callers in IN_GUEST_MODE may do so without locking, although they should
1264 * still hold a read lock on kvm->scru for the memslot checks.
1265 */
1266bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
1267                                gpa_t gpa, unsigned long len);
1268
1269/**
1270 * kvm_gfn_to_pfn_cache_refresh - update a previously initialized cache.
1271 *
1272 * @kvm:           pointer to kvm instance.
1273 * @gpc:           struct gfn_to_pfn_cache object.
1274 * @gpa:           updated guest physical address to map.
1275 * @len:           sanity check; the range being access must fit a single page.
1276 * @dirty:         mark the cache dirty immediately.
1277 *
1278 * @return:        0 for success.
1279 *                 -EINVAL for a mapping which would cross a page boundary.
1280 *                 -EFAULT for an untranslatable guest physical address.
1281 *
1282 * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
1283 * returm from this function does not mean the page can be immediately
1284 * accessed because it may have raced with an invalidation. Callers must
1285 * still lock and check the cache status, as this function does not return
1286 * with the lock still held to permit access.
1287 */
1288int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
1289                                 gpa_t gpa, unsigned long len, bool dirty);
1290
1291/**
1292 * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache.
1293 *
1294 * @kvm:           pointer to kvm instance.
1295 * @gpc:           struct gfn_to_pfn_cache object.
1296 *
1297 * This unmaps the referenced page and marks it dirty, if appropriate. The
1298 * cache is left in the invalid state but at least the mapping from GPA to
1299 * userspace HVA will remain cached and can be reused on a subsequent
1300 * refresh.
1301 */
1302void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
1303
1304/**
1305 * kvm_gfn_to_pfn_cache_destroy - destroy and unlink a gfn_to_pfn_cache.
1306 *
1307 * @kvm:           pointer to kvm instance.
1308 * @gpc:           struct gfn_to_pfn_cache object.
1309 *
1310 * This removes a cache from the @kvm's list to be processed on MMU notifier
1311 * invocation.
1312 */
1313void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
1314
1315void kvm_sigset_activate(struct kvm_vcpu *vcpu);
1316void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
1317
1318void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1319bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
1320void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
1321void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
1322bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
1323void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
1324int kvm_vcpu_yield_to(struct kvm_vcpu *target);
1325void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
1326
1327void kvm_flush_remote_tlbs(struct kvm *kvm);
1328void kvm_reload_remote_mmus(struct kvm *kvm);
1329
1330#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
1331int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
1332int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
1333void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
1334void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
1335#endif
1336
1337void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
1338                                   unsigned long end);
1339void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
1340                                   unsigned long end);
1341
1342long kvm_arch_dev_ioctl(struct file *filp,
1343                        unsigned int ioctl, unsigned long arg);
1344long kvm_arch_vcpu_ioctl(struct file *filp,
1345                         unsigned int ioctl, unsigned long arg);
1346vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
1347
1348int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
1349
1350void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1351                                        struct kvm_memory_slot *slot,
1352                                        gfn_t gfn_offset,
1353                                        unsigned long mask);
1354void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
1355
1356#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
1357void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
1358                                        const struct kvm_memory_slot *memslot);
1359#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
1360int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
1361int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1362                      int *is_dirty, struct kvm_memory_slot **memslot);
1363#endif
1364
1365int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1366                        bool line_status);
1367int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1368                            struct kvm_enable_cap *cap);
1369long kvm_arch_vm_ioctl(struct file *filp,
1370                       unsigned int ioctl, unsigned long arg);
1371
1372int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1373int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
1374
1375int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1376                                    struct kvm_translation *tr);
1377
1378int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1379int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
1380int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1381                                  struct kvm_sregs *sregs);
1382int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1383                                  struct kvm_sregs *sregs);
1384int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1385                                    struct kvm_mp_state *mp_state);
1386int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1387                                    struct kvm_mp_state *mp_state);
1388int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1389                                        struct kvm_guest_debug *dbg);
1390int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
1391
1392int kvm_arch_init(void *opaque);
1393void kvm_arch_exit(void);
1394
1395void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
1396
1397void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
1398void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
1399int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
1400int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
1401void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
1402void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
1403
1404#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
1405int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
1406#endif
1407
1408#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
1409void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
1410#endif
1411
1412int kvm_arch_hardware_enable(void);
1413void kvm_arch_hardware_disable(void);
1414int kvm_arch_hardware_setup(void *opaque);
1415void kvm_arch_hardware_unsetup(void);
1416int kvm_arch_check_processor_compat(void *opaque);
1417int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
1418bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
1419int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
1420bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
1421bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
1422int kvm_arch_post_init_vm(struct kvm *kvm);
1423void kvm_arch_pre_destroy_vm(struct kvm *kvm);
1424int kvm_arch_create_vm_debugfs(struct kvm *kvm);
1425
1426#ifndef __KVM_HAVE_ARCH_VM_ALLOC
1427/*
1428 * All architectures that want to use vzalloc currently also
1429 * need their own kvm_arch_alloc_vm implementation.
1430 */
1431static inline struct kvm *kvm_arch_alloc_vm(void)
1432{
1433        return kzalloc(sizeof(struct kvm), GFP_KERNEL);
1434}
1435#endif
1436
1437static inline void __kvm_arch_free_vm(struct kvm *kvm)
1438{
1439        kvfree(kvm);
1440}
1441
1442#ifndef __KVM_HAVE_ARCH_VM_FREE
1443static inline void kvm_arch_free_vm(struct kvm *kvm)
1444{
1445        __kvm_arch_free_vm(kvm);
1446}
1447#endif
1448
1449#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
1450static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
1451{
1452        return -ENOTSUPP;
1453}
1454#endif
1455
1456#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
1457void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
1458void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
1459bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
1460#else
1461static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
1462{
1463}
1464
1465static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
1466{
1467}
1468
1469static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
1470{
1471        return false;
1472}
1473#endif
1474#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
1475void kvm_arch_start_assignment(struct kvm *kvm);
1476void kvm_arch_end_assignment(struct kvm *kvm);
1477bool kvm_arch_has_assigned_device(struct kvm *kvm);
1478#else
1479static inline void kvm_arch_start_assignment(struct kvm *kvm)
1480{
1481}
1482
1483static inline void kvm_arch_end_assignment(struct kvm *kvm)
1484{
1485}
1486
1487static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
1488{
1489        return false;
1490}
1491#endif
1492
1493static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
1494{
1495#ifdef __KVM_HAVE_ARCH_WQP
1496        return vcpu->arch.waitp;
1497#else
1498        return &vcpu->wait;
1499#endif
1500}
1501
1502/*
1503 * Wake a vCPU if necessary, but don't do any stats/metadata updates.  Returns
1504 * true if the vCPU was blocking and was awakened, false otherwise.
1505 */
1506static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
1507{
1508        return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
1509}
1510
1511static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
1512{
1513        return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
1514}
1515
1516#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
1517/*
1518 * returns true if the virtual interrupt controller is initialized and
1519 * ready to accept virtual IRQ. On some architectures the virtual interrupt
1520 * controller is dynamically instantiated and this is not always true.
1521 */
1522bool kvm_arch_intc_initialized(struct kvm *kvm);
1523#else
1524static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
1525{
1526        return true;
1527}
1528#endif
1529
1530#ifdef CONFIG_GUEST_PERF_EVENTS
1531unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
1532
1533void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
1534void kvm_unregister_perf_callbacks(void);
1535#else
1536static inline void kvm_register_perf_callbacks(void *ign) {}
1537static inline void kvm_unregister_perf_callbacks(void) {}
1538#endif /* CONFIG_GUEST_PERF_EVENTS */
1539
1540int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
1541void kvm_arch_destroy_vm(struct kvm *kvm);
1542void kvm_arch_sync_events(struct kvm *kvm);
1543
1544int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
1545
1546bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
1547bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
1548
1549struct kvm_irq_ack_notifier {
1550        struct hlist_node link;
1551        unsigned gsi;
1552        void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
1553};
1554
1555int kvm_irq_map_gsi(struct kvm *kvm,
1556                    struct kvm_kernel_irq_routing_entry *entries, int gsi);
1557int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1558
1559int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1560                bool line_status);
1561int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1562                int irq_source_id, int level, bool line_status);
1563int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
1564                               struct kvm *kvm, int irq_source_id,
1565                               int level, bool line_status);
1566bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1567void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1568void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1569void kvm_register_irq_ack_notifier(struct kvm *kvm,
1570                                   struct kvm_irq_ack_notifier *kian);
1571void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1572                                   struct kvm_irq_ack_notifier *kian);
1573int kvm_request_irq_source_id(struct kvm *kvm);
1574void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
1575bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1576
1577/*
1578 * Returns a pointer to the memslot if it contains gfn.
1579 * Otherwise returns NULL.
1580 */
1581static inline struct kvm_memory_slot *
1582try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1583{
1584        if (!slot)
1585                return NULL;
1586
1587        if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
1588                return slot;
1589        else
1590                return NULL;
1591}
1592
1593/*
1594 * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL.
1595 *
1596 * With "approx" set returns the memslot also when the address falls
1597 * in a hole. In that case one of the memslots bordering the hole is
1598 * returned.
1599 */
1600static inline struct kvm_memory_slot *
1601search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1602{
1603        struct kvm_memory_slot *slot;
1604        struct rb_node *node;
1605        int idx = slots->node_idx;
1606
1607        slot = NULL;
1608        for (node = slots->gfn_tree.rb_node; node; ) {
1609                slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
1610                if (gfn >= slot->base_gfn) {
1611                        if (gfn < slot->base_gfn + slot->npages)
1612                                return slot;
1613                        node = node->rb_right;
1614                } else
1615                        node = node->rb_left;
1616        }
1617
1618        return approx ? slot : NULL;
1619}
1620
1621static inline struct kvm_memory_slot *
1622____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1623{
1624        struct kvm_memory_slot *slot;
1625
1626        slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot);
1627        slot = try_get_memslot(slot, gfn);
1628        if (slot)
1629                return slot;
1630
1631        slot = search_memslots(slots, gfn, approx);
1632        if (slot) {
1633                atomic_long_set(&slots->last_used_slot, (unsigned long)slot);
1634                return slot;
1635        }
1636
1637        return NULL;
1638}
1639
1640/*
1641 * __gfn_to_memslot() and its descendants are here to allow arch code to inline
1642 * the lookups in hot paths.  gfn_to_memslot() itself isn't here as an inline
1643 * because that would bloat other code too much.
1644 */
1645static inline struct kvm_memory_slot *
1646__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
1647{
1648        return ____gfn_to_memslot(slots, gfn, false);
1649}
1650
1651static inline unsigned long
1652__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
1653{
1654        /*
1655         * The index was checked originally in search_memslots.  To avoid
1656         * that a malicious guest builds a Spectre gadget out of e.g. page
1657         * table walks, do not let the processor speculate loads outside
1658         * the guest's registered memslots.
1659         */
1660        unsigned long offset = gfn - slot->base_gfn;
1661        offset = array_index_nospec(offset, slot->npages);
1662        return slot->userspace_addr + offset * PAGE_SIZE;
1663}
1664
1665static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1666{
1667        return gfn_to_memslot(kvm, gfn)->id;
1668}
1669
1670static inline gfn_t
1671hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
1672{
1673        gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
1674
1675        return slot->base_gfn + gfn_offset;
1676}
1677
1678static inline gpa_t gfn_to_gpa(gfn_t gfn)
1679{
1680        return (gpa_t)gfn << PAGE_SHIFT;
1681}
1682
1683static inline gfn_t gpa_to_gfn(gpa_t gpa)
1684{
1685        return (gfn_t)(gpa >> PAGE_SHIFT);
1686}
1687
1688static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
1689{
1690        return (hpa_t)pfn << PAGE_SHIFT;
1691}
1692
1693static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
1694                                                gpa_t gpa)
1695{
1696        return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
1697}
1698
1699static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
1700{
1701        unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
1702
1703        return kvm_is_error_hva(hva);
1704}
1705
1706enum kvm_stat_kind {
1707        KVM_STAT_VM,
1708        KVM_STAT_VCPU,
1709};
1710
1711struct kvm_stat_data {
1712        struct kvm *kvm;
1713        const struct _kvm_stats_desc *desc;
1714        enum kvm_stat_kind kind;
1715};
1716
1717struct _kvm_stats_desc {
1718        struct kvm_stats_desc desc;
1719        char name[KVM_STATS_NAME_SIZE];
1720};
1721
1722#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz)                      \
1723        .flags = type | unit | base |                                          \
1724                 BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) |              \
1725                 BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) |              \
1726                 BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK),               \
1727        .exponent = exp,                                                       \
1728        .size = sz,                                                            \
1729        .bucket_size = bsz
1730
1731#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz)            \
1732        {                                                                      \
1733                {                                                              \
1734                        STATS_DESC_COMMON(type, unit, base, exp, sz, bsz),     \
1735                        .offset = offsetof(struct kvm_vm_stat, generic.stat)   \
1736                },                                                             \
1737                .name = #stat,                                                 \
1738        }
1739#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz)          \
1740        {                                                                      \
1741                {                                                              \
1742                        STATS_DESC_COMMON(type, unit, base, exp, sz, bsz),     \
1743                        .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
1744                },                                                             \
1745                .name = #stat,                                                 \
1746        }
1747#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz)                    \
1748        {                                                                      \
1749                {                                                              \
1750                        STATS_DESC_COMMON(type, unit, base, exp, sz, bsz),     \
1751                        .offset = offsetof(struct kvm_vm_stat, stat)           \
1752                },                                                             \
1753                .name = #stat,                                                 \
1754        }
1755#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz)                  \
1756        {                                                                      \
1757                {                                                              \
1758                        STATS_DESC_COMMON(type, unit, base, exp, sz, bsz),     \
1759                        .offset = offsetof(struct kvm_vcpu_stat, stat)         \
1760                },                                                             \
1761                .name = #stat,                                                 \
1762        }
1763/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
1764#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz)                \
1765        SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
1766
1767#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent)               \
1768        STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE,                     \
1769                unit, base, exponent, 1, 0)
1770#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent)                  \
1771        STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT,                        \
1772                unit, base, exponent, 1, 0)
1773#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent)                     \
1774        STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK,                           \
1775                unit, base, exponent, 1, 0)
1776#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz)     \
1777        STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST,                    \
1778                unit, base, exponent, sz, bsz)
1779#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz)             \
1780        STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST,                       \
1781                unit, base, exponent, sz, 0)
1782
1783/* Cumulative counter, read/write */
1784#define STATS_DESC_COUNTER(SCOPE, name)                                        \
1785        STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE,                \
1786                KVM_STATS_BASE_POW10, 0)
1787/* Instantaneous counter, read only */
1788#define STATS_DESC_ICOUNTER(SCOPE, name)                                       \
1789        STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE,                   \
1790                KVM_STATS_BASE_POW10, 0)
1791/* Peak counter, read/write */
1792#define STATS_DESC_PCOUNTER(SCOPE, name)                                       \
1793        STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE,                      \
1794                KVM_STATS_BASE_POW10, 0)
1795
1796/* Cumulative time in nanosecond */
1797#define STATS_DESC_TIME_NSEC(SCOPE, name)                                      \
1798        STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS,             \
1799                KVM_STATS_BASE_POW10, -9)
1800/* Linear histogram for time in nanosecond */
1801#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz)                     \
1802        STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS,            \
1803                KVM_STATS_BASE_POW10, -9, sz, bsz)
1804/* Logarithmic histogram for time in nanosecond */
1805#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz)                          \
1806        STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS,               \
1807                KVM_STATS_BASE_POW10, -9, sz)
1808
1809#define KVM_GENERIC_VM_STATS()                                                 \
1810        STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush),                      \
1811        STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)
1812
1813#define KVM_GENERIC_VCPU_STATS()                                               \
1814        STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll),                \
1815        STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll),                 \
1816        STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid),                   \
1817        STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup),                         \
1818        STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns),              \
1819        STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns),                 \
1820        STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns),                      \
1821        STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist,     \
1822                        HALT_POLL_HIST_COUNT),                                 \
1823        STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist,        \
1824                        HALT_POLL_HIST_COUNT),                                 \
1825        STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist,             \
1826                        HALT_POLL_HIST_COUNT),                                 \
1827        STATS_DESC_ICOUNTER(VCPU_GENERIC, blocking)
1828
1829extern struct dentry *kvm_debugfs_dir;
1830
1831ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
1832                       const struct _kvm_stats_desc *desc,
1833                       void *stats, size_t size_stats,
1834                       char __user *user_buffer, size_t size, loff_t *offset);
1835
1836/**
1837 * kvm_stats_linear_hist_update() - Update bucket value for linear histogram
1838 * statistics data.
1839 *
1840 * @data: start address of the stats data
1841 * @size: the number of bucket of the stats data
1842 * @value: the new value used to update the linear histogram's bucket
1843 * @bucket_size: the size (width) of a bucket
1844 */
1845static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
1846                                                u64 value, size_t bucket_size)
1847{
1848        size_t index = div64_u64(value, bucket_size);
1849
1850        index = min(index, size - 1);
1851        ++data[index];
1852}
1853
1854/**
1855 * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram
1856 * statistics data.
1857 *
1858 * @data: start address of the stats data
1859 * @size: the number of bucket of the stats data
1860 * @value: the new value used to update the logarithmic histogram's bucket
1861 */
1862static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
1863{
1864        size_t index = fls64(value);
1865
1866        index = min(index, size - 1);
1867        ++data[index];
1868}
1869
1870#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize)                      \
1871        kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
1872#define KVM_STATS_LOG_HIST_UPDATE(array, value)                                \
1873        kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)
1874
1875
1876extern const struct kvm_stats_header kvm_vm_stats_header;
1877extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
1878extern const struct kvm_stats_header kvm_vcpu_stats_header;
1879extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
1880
1881#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1882static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
1883{
1884        if (unlikely(kvm->mmu_notifier_count))
1885                return 1;
1886        /*
1887         * Ensure the read of mmu_notifier_count happens before the read
1888         * of mmu_notifier_seq.  This interacts with the smp_wmb() in
1889         * mmu_notifier_invalidate_range_end to make sure that the caller
1890         * either sees the old (non-zero) value of mmu_notifier_count or
1891         * the new (incremented) value of mmu_notifier_seq.
1892         * PowerPC Book3s HV KVM calls this under a per-page lock
1893         * rather than under kvm->mmu_lock, for scalability, so
1894         * can't rely on kvm->mmu_lock to keep things ordered.
1895         */
1896        smp_rmb();
1897        if (kvm->mmu_notifier_seq != mmu_seq)
1898                return 1;
1899        return 0;
1900}
1901
1902static inline int mmu_notifier_retry_hva(struct kvm *kvm,
1903                                         unsigned long mmu_seq,
1904                                         unsigned long hva)
1905{
1906        lockdep_assert_held(&kvm->mmu_lock);
1907        /*
1908         * If mmu_notifier_count is non-zero, then the range maintained by
1909         * kvm_mmu_notifier_invalidate_range_start contains all addresses that
1910         * might be being invalidated. Note that it may include some false
1911         * positives, due to shortcuts when handing concurrent invalidations.
1912         */
1913        if (unlikely(kvm->mmu_notifier_count) &&
1914            hva >= kvm->mmu_notifier_range_start &&
1915            hva < kvm->mmu_notifier_range_end)
1916                return 1;
1917        if (kvm->mmu_notifier_seq != mmu_seq)
1918                return 1;
1919        return 0;
1920}
1921#endif
1922
1923#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1924
1925#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
1926
1927bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
1928int kvm_set_irq_routing(struct kvm *kvm,
1929                        const struct kvm_irq_routing_entry *entries,
1930                        unsigned nr,
1931                        unsigned flags);
1932int kvm_set_routing_entry(struct kvm *kvm,
1933                          struct kvm_kernel_irq_routing_entry *e,
1934                          const struct kvm_irq_routing_entry *ue);
1935void kvm_free_irq_routing(struct kvm *kvm);
1936
1937#else
1938
1939static inline void kvm_free_irq_routing(struct kvm *kvm) {}
1940
1941#endif
1942
1943int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1944
1945#ifdef CONFIG_HAVE_KVM_EVENTFD
1946
1947void kvm_eventfd_init(struct kvm *kvm);
1948int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1949
1950#ifdef CONFIG_HAVE_KVM_IRQFD
1951int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1952void kvm_irqfd_release(struct kvm *kvm);
1953void kvm_irq_routing_update(struct kvm *);
1954#else
1955static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1956{
1957        return -EINVAL;
1958}
1959
1960static inline void kvm_irqfd_release(struct kvm *kvm) {}
1961#endif
1962
1963#else
1964
1965static inline void kvm_eventfd_init(struct kvm *kvm) {}
1966
1967static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1968{
1969        return -EINVAL;
1970}
1971
1972static inline void kvm_irqfd_release(struct kvm *kvm) {}
1973
1974#ifdef CONFIG_HAVE_KVM_IRQCHIP
1975static inline void kvm_irq_routing_update(struct kvm *kvm)
1976{
1977}
1978#endif
1979
1980static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1981{
1982        return -ENOSYS;
1983}
1984
1985#endif /* CONFIG_HAVE_KVM_EVENTFD */
1986
1987void kvm_arch_irq_routing_update(struct kvm *kvm);
1988
1989static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1990{
1991        /*
1992         * Ensure the rest of the request is published to kvm_check_request's
1993         * caller.  Paired with the smp_mb__after_atomic in kvm_check_request.
1994         */
1995        smp_wmb();
1996        set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1997}
1998
1999static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
2000{
2001        return READ_ONCE(vcpu->requests);
2002}
2003
2004static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
2005{
2006        return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2007}
2008
2009static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
2010{
2011        clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
2012}
2013
2014static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
2015{
2016        if (kvm_test_request(req, vcpu)) {
2017                kvm_clear_request(req, vcpu);
2018
2019                /*
2020                 * Ensure the rest of the request is visible to kvm_check_request's
2021                 * caller.  Paired with the smp_wmb in kvm_make_request.
2022                 */
2023                smp_mb__after_atomic();
2024                return true;
2025        } else {
2026                return false;
2027        }
2028}
2029
2030extern bool kvm_rebooting;
2031
2032extern unsigned int halt_poll_ns;
2033extern unsigned int halt_poll_ns_grow;
2034extern unsigned int halt_poll_ns_grow_start;
2035extern unsigned int halt_poll_ns_shrink;
2036
2037struct kvm_device {
2038        const struct kvm_device_ops *ops;
2039        struct kvm *kvm;
2040        void *private;
2041        struct list_head vm_node;
2042};
2043
2044/* create, destroy, and name are mandatory */
2045struct kvm_device_ops {
2046        const char *name;
2047
2048        /*
2049         * create is called holding kvm->lock and any operations not suitable
2050         * to do while holding the lock should be deferred to init (see
2051         * below).
2052         */
2053        int (*create)(struct kvm_device *dev, u32 type);
2054
2055        /*
2056         * init is called after create if create is successful and is called
2057         * outside of holding kvm->lock.
2058         */
2059        void (*init)(struct kvm_device *dev);
2060
2061        /*
2062         * Destroy is responsible for freeing dev.
2063         *
2064         * Destroy may be called before or after destructors are called
2065         * on emulated I/O regions, depending on whether a reference is
2066         * held by a vcpu or other kvm component that gets destroyed
2067         * after the emulated I/O.
2068         */
2069        void (*destroy)(struct kvm_device *dev);
2070
2071        /*
2072         * Release is an alternative method to free the device. It is
2073         * called when the device file descriptor is closed. Once
2074         * release is called, the destroy method will not be called
2075         * anymore as the device is removed from the device list of
2076         * the VM. kvm->lock is held.
2077         */
2078        void (*release)(struct kvm_device *dev);
2079
2080        int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2081        int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2082        int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
2083        long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
2084                      unsigned long arg);
2085        int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
2086};
2087
2088void kvm_device_get(struct kvm_device *dev);
2089void kvm_device_put(struct kvm_device *dev);
2090struct kvm_device *kvm_device_from_filp(struct file *filp);
2091int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
2092void kvm_unregister_device_ops(u32 type);
2093
2094extern struct kvm_device_ops kvm_mpic_ops;
2095extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
2096extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
2097
2098#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2099
2100static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
2101{
2102        vcpu->spin_loop.in_spin_loop = val;
2103}
2104static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
2105{
2106        vcpu->spin_loop.dy_eligible = val;
2107}
2108
2109#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
2110
2111static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
2112{
2113}
2114
2115static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
2116{
2117}
2118#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
2119
2120static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
2121{
2122        return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
2123                !(memslot->flags & KVM_MEMSLOT_INVALID));
2124}
2125
2126struct kvm_vcpu *kvm_get_running_vcpu(void);
2127struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
2128
2129#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
2130bool kvm_arch_has_irq_bypass(void);
2131int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
2132                           struct irq_bypass_producer *);
2133void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
2134                           struct irq_bypass_producer *);
2135void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
2136void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
2137int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
2138                                  uint32_t guest_irq, bool set);
2139bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
2140                                  struct kvm_kernel_irq_routing_entry *);
2141#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
2142
2143#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
2144/* If we wakeup during the poll time, was it a sucessful poll? */
2145static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
2146{
2147        return vcpu->valid_wakeup;
2148}
2149
2150#else
2151static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
2152{
2153        return true;
2154}
2155#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
2156
2157#ifdef CONFIG_HAVE_KVM_NO_POLL
2158/* Callback that tells if we must not poll */
2159bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
2160#else
2161static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
2162{
2163        return false;
2164}
2165#endif /* CONFIG_HAVE_KVM_NO_POLL */
2166
2167#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
2168long kvm_arch_vcpu_async_ioctl(struct file *filp,
2169                               unsigned int ioctl, unsigned long arg);
2170#else
2171static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
2172                                             unsigned int ioctl,
2173                                             unsigned long arg)
2174{
2175        return -ENOIOCTLCMD;
2176}
2177#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
2178
2179void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
2180                                            unsigned long start, unsigned long end);
2181
2182#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
2183int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
2184#else
2185static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
2186{
2187        return 0;
2188}
2189#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
2190
2191typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
2192
2193int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
2194                                uintptr_t data, const char *name,
2195                                struct task_struct **thread_ptr);
2196
2197#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
2198static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
2199{
2200        vcpu->run->exit_reason = KVM_EXIT_INTR;
2201        vcpu->stat.signal_exits++;
2202}
2203#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
2204
2205/*
2206 * This defines how many reserved entries we want to keep before we
2207 * kick the vcpu to the userspace to avoid dirty ring full.  This
2208 * value can be tuned to higher if e.g. PML is enabled on the host.
2209 */
2210#define  KVM_DIRTY_RING_RSVD_ENTRIES  64
2211
2212/* Max number of entries allowed for each kvm dirty ring */
2213#define  KVM_DIRTY_RING_MAX_ENTRIES  65536
2214
2215#endif
2216