linux/include/linux/kvm_host.h
<<
>>
Prefs
   1#ifndef __KVM_HOST_H
   2#define __KVM_HOST_H
   3
   4/*
   5 * This work is licensed under the terms of the GNU GPL, version 2.  See
   6 * the COPYING file in the top-level directory.
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/hardirq.h>
  11#include <linux/list.h>
  12#include <linux/mutex.h>
  13#include <linux/spinlock.h>
  14#include <linux/signal.h>
  15#include <linux/sched.h>
  16#include <linux/mm.h>
  17#include <linux/preempt.h>
  18#include <linux/msi.h>
  19#include <asm/signal.h>
  20
  21#include <linux/kvm.h>
  22#include <linux/kvm_para.h>
  23
  24#include <linux/kvm_types.h>
  25
  26#include <asm/kvm_host.h>
  27
  28/*
  29 * vcpu->requests bit members
  30 */
  31#define KVM_REQ_TLB_FLUSH          0
  32#define KVM_REQ_MIGRATE_TIMER      1
  33#define KVM_REQ_REPORT_TPR_ACCESS  2
  34#define KVM_REQ_MMU_RELOAD         3
  35#define KVM_REQ_TRIPLE_FAULT       4
  36#define KVM_REQ_PENDING_TIMER      5
  37#define KVM_REQ_UNHALT             6
  38#define KVM_REQ_MMU_SYNC           7
  39#define KVM_REQ_KVMCLOCK_UPDATE    8
  40#define KVM_REQ_KICK               9
  41
  42#define KVM_USERSPACE_IRQ_SOURCE_ID     0
  43
  44struct kvm;
  45struct kvm_vcpu;
  46extern struct kmem_cache *kvm_vcpu_cache;
  47
  48/*
  49 * It would be nice to use something smarter than a linear search, TBD...
  50 * Thankfully we dont expect many devices to register (famous last words :),
  51 * so until then it will suffice.  At least its abstracted so we can change
  52 * in one place.
  53 */
  54struct kvm_io_bus {
  55        int                   dev_count;
  56#define NR_IOBUS_DEVS 6
  57        struct kvm_io_device *devs[NR_IOBUS_DEVS];
  58};
  59
  60void kvm_io_bus_init(struct kvm_io_bus *bus);
  61void kvm_io_bus_destroy(struct kvm_io_bus *bus);
  62int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, int len,
  63                     const void *val);
  64int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len,
  65                    void *val);
  66int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
  67                               struct kvm_io_device *dev);
  68int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
  69                            struct kvm_io_device *dev);
  70void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
  71                                 struct kvm_io_device *dev);
  72void kvm_io_bus_unregister_dev(struct kvm *kvm, struct kvm_io_bus *bus,
  73                               struct kvm_io_device *dev);
  74
  75struct kvm_vcpu {
  76        struct kvm *kvm;
  77#ifdef CONFIG_PREEMPT_NOTIFIERS
  78        struct preempt_notifier preempt_notifier;
  79#endif
  80        int vcpu_id;
  81        struct mutex mutex;
  82        int   cpu;
  83        struct kvm_run *run;
  84        unsigned long requests;
  85        unsigned long guest_debug;
  86        int fpu_active;
  87        int guest_fpu_loaded;
  88        wait_queue_head_t wq;
  89        int sigset_active;
  90        sigset_t sigset;
  91        struct kvm_vcpu_stat stat;
  92
  93#ifdef CONFIG_HAS_IOMEM
  94        int mmio_needed;
  95        int mmio_read_completed;
  96        int mmio_is_write;
  97        int mmio_size;
  98        unsigned char mmio_data[8];
  99        gpa_t mmio_phys_addr;
 100#endif
 101
 102        struct kvm_vcpu_arch arch;
 103};
 104
 105struct kvm_memory_slot {
 106        gfn_t base_gfn;
 107        unsigned long npages;
 108        unsigned long flags;
 109        unsigned long *rmap;
 110        unsigned long *dirty_bitmap;
 111        struct {
 112                unsigned long rmap_pde;
 113                int write_count;
 114        } *lpage_info[KVM_NR_PAGE_SIZES - 1];
 115        unsigned long userspace_addr;
 116        int user_alloc;
 117};
 118
 119struct kvm_kernel_irq_routing_entry {
 120        u32 gsi;
 121        u32 type;
 122        int (*set)(struct kvm_kernel_irq_routing_entry *e,
 123                    struct kvm *kvm, int level);
 124        union {
 125                struct {
 126                        unsigned irqchip;
 127                        unsigned pin;
 128                } irqchip;
 129                struct msi_msg msi;
 130        };
 131        struct list_head link;
 132};
 133
 134struct kvm {
 135        spinlock_t mmu_lock;
 136        spinlock_t requests_lock;
 137        struct rw_semaphore slots_lock;
 138        struct mm_struct *mm; /* userspace tied to this vm */
 139        int nmemslots;
 140        struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
 141                                        KVM_PRIVATE_MEM_SLOTS];
 142#ifdef CONFIG_KVM_APIC_ARCHITECTURE
 143        u32 bsp_vcpu_id;
 144        struct kvm_vcpu *bsp_vcpu;
 145#endif
 146        struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
 147        atomic_t online_vcpus;
 148        struct list_head vm_list;
 149        struct mutex lock;
 150        struct kvm_io_bus mmio_bus;
 151        struct kvm_io_bus pio_bus;
 152#ifdef CONFIG_HAVE_KVM_EVENTFD
 153        struct {
 154                spinlock_t        lock;
 155                struct list_head  items;
 156        } irqfds;
 157        struct list_head ioeventfds;
 158#endif
 159        struct kvm_vm_stat stat;
 160        struct kvm_arch arch;
 161        atomic_t users_count;
 162#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
 163        struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
 164        struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
 165#endif
 166
 167        struct mutex irq_lock;
 168#ifdef CONFIG_HAVE_KVM_IRQCHIP
 169        struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */
 170        struct hlist_head mask_notifier_list;
 171#endif
 172
 173#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
 174        struct mmu_notifier mmu_notifier;
 175        unsigned long mmu_notifier_seq;
 176        long mmu_notifier_count;
 177#endif
 178};
 179
 180/* The guest did something we don't support. */
 181#define pr_unimpl(vcpu, fmt, ...)                                       \
 182 do {                                                                   \
 183        if (printk_ratelimit())                                         \
 184                printk(KERN_ERR "kvm: %i: cpu%i " fmt,                  \
 185                       current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
 186 } while (0)
 187
 188#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
 189#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
 190
 191static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 192{
 193        smp_rmb();
 194        return kvm->vcpus[i];
 195}
 196
 197#define kvm_for_each_vcpu(idx, vcpup, kvm) \
 198        for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \
 199             idx < atomic_read(&kvm->online_vcpus) && vcpup; \
 200             vcpup = kvm_get_vcpu(kvm, ++idx))
 201
 202int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
 203void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
 204
 205void vcpu_load(struct kvm_vcpu *vcpu);
 206void vcpu_put(struct kvm_vcpu *vcpu);
 207
 208int kvm_init(void *opaque, unsigned int vcpu_size,
 209                  struct module *module);
 210void kvm_exit(void);
 211
 212void kvm_get_kvm(struct kvm *kvm);
 213void kvm_put_kvm(struct kvm *kvm);
 214
 215#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
 216#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
 217static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
 218struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
 219
 220extern struct page *bad_page;
 221extern pfn_t bad_pfn;
 222
 223int is_error_page(struct page *page);
 224int is_error_pfn(pfn_t pfn);
 225int kvm_is_error_hva(unsigned long addr);
 226int kvm_set_memory_region(struct kvm *kvm,
 227                          struct kvm_userspace_memory_region *mem,
 228                          int user_alloc);
 229int __kvm_set_memory_region(struct kvm *kvm,
 230                            struct kvm_userspace_memory_region *mem,
 231                            int user_alloc);
 232int kvm_arch_set_memory_region(struct kvm *kvm,
 233                                struct kvm_userspace_memory_region *mem,
 234                                struct kvm_memory_slot old,
 235                                int user_alloc);
 236void kvm_disable_largepages(void);
 237void kvm_arch_flush_shadow(struct kvm *kvm);
 238gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
 239struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
 240unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
 241void kvm_release_page_clean(struct page *page);
 242void kvm_release_page_dirty(struct page *page);
 243void kvm_set_page_dirty(struct page *page);
 244void kvm_set_page_accessed(struct page *page);
 245
 246pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
 247void kvm_release_pfn_dirty(pfn_t);
 248void kvm_release_pfn_clean(pfn_t pfn);
 249void kvm_set_pfn_dirty(pfn_t pfn);
 250void kvm_set_pfn_accessed(pfn_t pfn);
 251void kvm_get_pfn(pfn_t pfn);
 252
 253int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
 254                        int len);
 255int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
 256                          unsigned long len);
 257int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
 258int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
 259                         int offset, int len);
 260int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
 261                    unsigned long len);
 262int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 263int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 264struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 265int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
 266void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
 267
 268void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 269void kvm_resched(struct kvm_vcpu *vcpu);
 270void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 271void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 272void kvm_flush_remote_tlbs(struct kvm *kvm);
 273void kvm_reload_remote_mmus(struct kvm *kvm);
 274
 275long kvm_arch_dev_ioctl(struct file *filp,
 276                        unsigned int ioctl, unsigned long arg);
 277long kvm_arch_vcpu_ioctl(struct file *filp,
 278                         unsigned int ioctl, unsigned long arg);
 279
 280int kvm_dev_ioctl_check_extension(long ext);
 281
 282int kvm_get_dirty_log(struct kvm *kvm,
 283                        struct kvm_dirty_log *log, int *is_dirty);
 284int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 285                                struct kvm_dirty_log *log);
 286
 287int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
 288                                   struct
 289                                   kvm_userspace_memory_region *mem,
 290                                   int user_alloc);
 291long kvm_arch_vm_ioctl(struct file *filp,
 292                       unsigned int ioctl, unsigned long arg);
 293
 294int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 295int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 296
 297int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 298                                    struct kvm_translation *tr);
 299
 300int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 301int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 302int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 303                                  struct kvm_sregs *sregs);
 304int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 305                                  struct kvm_sregs *sregs);
 306int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 307                                    struct kvm_mp_state *mp_state);
 308int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 309                                    struct kvm_mp_state *mp_state);
 310int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 311                                        struct kvm_guest_debug *dbg);
 312int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 313
 314int kvm_arch_init(void *opaque);
 315void kvm_arch_exit(void);
 316
 317int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
 318void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
 319
 320void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
 321void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 322void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
 323struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
 324int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
 325void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
 326
 327int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
 328void kvm_arch_hardware_enable(void *garbage);
 329void kvm_arch_hardware_disable(void *garbage);
 330int kvm_arch_hardware_setup(void);
 331void kvm_arch_hardware_unsetup(void);
 332void kvm_arch_check_processor_compat(void *rtn);
 333int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 334
 335void kvm_free_physmem(struct kvm *kvm);
 336
 337struct  kvm *kvm_arch_create_vm(void);
 338void kvm_arch_destroy_vm(struct kvm *kvm);
 339void kvm_free_all_assigned_devices(struct kvm *kvm);
 340void kvm_arch_sync_events(struct kvm *kvm);
 341
 342int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
 343void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 344
 345int kvm_is_mmio_pfn(pfn_t pfn);
 346
 347struct kvm_irq_ack_notifier {
 348        struct hlist_node link;
 349        unsigned gsi;
 350        void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
 351};
 352
 353#define KVM_ASSIGNED_MSIX_PENDING               0x1
 354struct kvm_guest_msix_entry {
 355        u32 vector;
 356        u16 entry;
 357        u16 flags;
 358};
 359
 360struct kvm_assigned_dev_kernel {
 361        struct kvm_irq_ack_notifier ack_notifier;
 362        struct work_struct interrupt_work;
 363        struct list_head list;
 364        int assigned_dev_id;
 365        int host_busnr;
 366        int host_devfn;
 367        unsigned int entries_nr;
 368        int host_irq;
 369        bool host_irq_disabled;
 370        struct msix_entry *host_msix_entries;
 371        int guest_irq;
 372        struct kvm_guest_msix_entry *guest_msix_entries;
 373        unsigned long irq_requested_type;
 374        int irq_source_id;
 375        int flags;
 376        struct pci_dev *dev;
 377        struct kvm *kvm;
 378        spinlock_t assigned_dev_lock;
 379};
 380
 381struct kvm_irq_mask_notifier {
 382        void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
 383        int irq;
 384        struct hlist_node link;
 385};
 386
 387void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
 388                                    struct kvm_irq_mask_notifier *kimn);
 389void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
 390                                      struct kvm_irq_mask_notifier *kimn);
 391void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
 392
 393int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
 394void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
 395void kvm_register_irq_ack_notifier(struct kvm *kvm,
 396                                   struct kvm_irq_ack_notifier *kian);
 397void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
 398                                   struct kvm_irq_ack_notifier *kian);
 399int kvm_request_irq_source_id(struct kvm *kvm);
 400void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
 401
 402/* For vcpu->arch.iommu_flags */
 403#define KVM_IOMMU_CACHE_COHERENCY       0x1
 404
 405#ifdef CONFIG_IOMMU_API
 406int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
 407                        unsigned long npages);
 408int kvm_iommu_map_guest(struct kvm *kvm);
 409int kvm_iommu_unmap_guest(struct kvm *kvm);
 410int kvm_assign_device(struct kvm *kvm,
 411                      struct kvm_assigned_dev_kernel *assigned_dev);
 412int kvm_deassign_device(struct kvm *kvm,
 413                        struct kvm_assigned_dev_kernel *assigned_dev);
 414#else /* CONFIG_IOMMU_API */
 415static inline int kvm_iommu_map_pages(struct kvm *kvm,
 416                                      gfn_t base_gfn,
 417                                      unsigned long npages)
 418{
 419        return 0;
 420}
 421
 422static inline int kvm_iommu_map_guest(struct kvm *kvm)
 423{
 424        return -ENODEV;
 425}
 426
 427static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
 428{
 429        return 0;
 430}
 431
 432static inline int kvm_assign_device(struct kvm *kvm,
 433                struct kvm_assigned_dev_kernel *assigned_dev)
 434{
 435        return 0;
 436}
 437
 438static inline int kvm_deassign_device(struct kvm *kvm,
 439                struct kvm_assigned_dev_kernel *assigned_dev)
 440{
 441        return 0;
 442}
 443#endif /* CONFIG_IOMMU_API */
 444
 445static inline void kvm_guest_enter(void)
 446{
 447        account_system_vtime(current);
 448        current->flags |= PF_VCPU;
 449}
 450
 451static inline void kvm_guest_exit(void)
 452{
 453        account_system_vtime(current);
 454        current->flags &= ~PF_VCPU;
 455}
 456
 457static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
 458{
 459        return slot - kvm->memslots;
 460}
 461
 462static inline gpa_t gfn_to_gpa(gfn_t gfn)
 463{
 464        return (gpa_t)gfn << PAGE_SHIFT;
 465}
 466
 467static inline hpa_t pfn_to_hpa(pfn_t pfn)
 468{
 469        return (hpa_t)pfn << PAGE_SHIFT;
 470}
 471
 472static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
 473{
 474        set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
 475}
 476
 477enum kvm_stat_kind {
 478        KVM_STAT_VM,
 479        KVM_STAT_VCPU,
 480};
 481
 482struct kvm_stats_debugfs_item {
 483        const char *name;
 484        int offset;
 485        enum kvm_stat_kind kind;
 486        struct dentry *dentry;
 487};
 488extern struct kvm_stats_debugfs_item debugfs_entries[];
 489extern struct dentry *kvm_debugfs_dir;
 490
 491#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
 492static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
 493{
 494        if (unlikely(vcpu->kvm->mmu_notifier_count))
 495                return 1;
 496        /*
 497         * Both reads happen under the mmu_lock and both values are
 498         * modified under mmu_lock, so there's no need of smb_rmb()
 499         * here in between, otherwise mmu_notifier_count should be
 500         * read before mmu_notifier_seq, see
 501         * mmu_notifier_invalidate_range_end write side.
 502         */
 503        if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
 504                return 1;
 505        return 0;
 506}
 507#endif
 508
 509#ifdef CONFIG_HAVE_KVM_IRQCHIP
 510
 511#define KVM_MAX_IRQ_ROUTES 1024
 512
 513int kvm_setup_default_irq_routing(struct kvm *kvm);
 514int kvm_set_irq_routing(struct kvm *kvm,
 515                        const struct kvm_irq_routing_entry *entries,
 516                        unsigned nr,
 517                        unsigned flags);
 518void kvm_free_irq_routing(struct kvm *kvm);
 519
 520#else
 521
 522static inline void kvm_free_irq_routing(struct kvm *kvm) {}
 523
 524#endif
 525
 526#ifdef CONFIG_HAVE_KVM_EVENTFD
 527
 528void kvm_eventfd_init(struct kvm *kvm);
 529int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
 530void kvm_irqfd_release(struct kvm *kvm);
 531int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 532
 533#else
 534
 535static inline void kvm_eventfd_init(struct kvm *kvm) {}
 536static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
 537{
 538        return -EINVAL;
 539}
 540
 541static inline void kvm_irqfd_release(struct kvm *kvm) {}
 542static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 543{
 544        return -ENOSYS;
 545}
 546
 547#endif /* CONFIG_HAVE_KVM_EVENTFD */
 548
 549#ifdef CONFIG_KVM_APIC_ARCHITECTURE
 550static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
 551{
 552        return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
 553}
 554#endif
 555#endif
 556