linux/virt/kvm/kvm_main.c
<<
>>
Prefs
   1/*
   2 * Kernel-based Virtual Machine driver for Linux
   3 *
   4 * This module enables machines with Intel VT-x extensions to run virtual
   5 * machines without emulation or binary translation.
   6 *
   7 * Copyright (C) 2006 Qumranet, Inc.
   8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
   9 *
  10 * Authors:
  11 *   Avi Kivity   <avi@qumranet.com>
  12 *   Yaniv Kamay  <yaniv@qumranet.com>
  13 *
  14 * This work is licensed under the terms of the GNU GPL, version 2.  See
  15 * the COPYING file in the top-level directory.
  16 *
  17 */
  18
  19#include "iodev.h"
  20
  21#include <linux/kvm_host.h>
  22#include <linux/kvm.h>
  23#include <linux/module.h>
  24#include <linux/errno.h>
  25#include <linux/percpu.h>
  26#include <linux/mm.h>
  27#include <linux/miscdevice.h>
  28#include <linux/vmalloc.h>
  29#include <linux/reboot.h>
  30#include <linux/debugfs.h>
  31#include <linux/highmem.h>
  32#include <linux/file.h>
  33#include <linux/syscore_ops.h>
  34#include <linux/cpu.h>
  35#include <linux/sched.h>
  36#include <linux/cpumask.h>
  37#include <linux/smp.h>
  38#include <linux/anon_inodes.h>
  39#include <linux/profile.h>
  40#include <linux/kvm_para.h>
  41#include <linux/pagemap.h>
  42#include <linux/mman.h>
  43#include <linux/swap.h>
  44#include <linux/bitops.h>
  45#include <linux/spinlock.h>
  46#include <linux/compat.h>
  47#include <linux/srcu.h>
  48#include <linux/hugetlb.h>
  49#include <linux/slab.h>
  50#include <linux/sort.h>
  51#include <linux/bsearch.h>
  52
  53#include <asm/processor.h>
  54#include <asm/io.h>
  55#include <asm/uaccess.h>
  56#include <asm/pgtable.h>
  57
  58#include "coalesced_mmio.h"
  59#include "async_pf.h"
  60
  61#define CREATE_TRACE_POINTS
  62#include <trace/events/kvm.h>
  63
  64MODULE_AUTHOR("Qumranet");
  65MODULE_LICENSE("GPL");
  66
  67/*
  68 * Ordering of locks:
  69 *
  70 *              kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  71 */
  72
  73DEFINE_SPINLOCK(kvm_lock);
  74static DEFINE_RAW_SPINLOCK(kvm_count_lock);
  75LIST_HEAD(vm_list);
  76
  77static cpumask_var_t cpus_hardware_enabled;
  78static int kvm_usage_count = 0;
  79static atomic_t hardware_enable_failed;
  80
  81struct kmem_cache *kvm_vcpu_cache;
  82EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  83
  84static __read_mostly struct preempt_ops kvm_preempt_ops;
  85
  86struct dentry *kvm_debugfs_dir;
  87
  88static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  89                           unsigned long arg);
  90#ifdef CONFIG_COMPAT
  91static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
  92                                  unsigned long arg);
  93#endif
  94static int hardware_enable_all(void);
  95static void hardware_disable_all(void);
  96
  97static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
  98static void update_memslots(struct kvm_memslots *slots,
  99                            struct kvm_memory_slot *new, u64 last_generation);
 100
 101static void kvm_release_pfn_dirty(pfn_t pfn);
 102static void mark_page_dirty_in_slot(struct kvm *kvm,
 103                                    struct kvm_memory_slot *memslot, gfn_t gfn);
 104
 105__visible bool kvm_rebooting;
 106EXPORT_SYMBOL_GPL(kvm_rebooting);
 107
 108static bool largepages_enabled = true;
 109
 110bool kvm_is_mmio_pfn(pfn_t pfn)
 111{
 112        if (pfn_valid(pfn))
 113                return PageReserved(pfn_to_page(pfn));
 114
 115        return true;
 116}
 117
 118/*
 119 * Switches to specified vcpu, until a matching vcpu_put()
 120 */
 121int vcpu_load(struct kvm_vcpu *vcpu)
 122{
 123        int cpu;
 124
 125        if (mutex_lock_killable(&vcpu->mutex))
 126                return -EINTR;
 127        if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
 128                /* The thread running this VCPU changed. */
 129                struct pid *oldpid = vcpu->pid;
 130                struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
 131                rcu_assign_pointer(vcpu->pid, newpid);
 132                synchronize_rcu();
 133                put_pid(oldpid);
 134        }
 135        cpu = get_cpu();
 136        preempt_notifier_register(&vcpu->preempt_notifier);
 137        kvm_arch_vcpu_load(vcpu, cpu);
 138        put_cpu();
 139        return 0;
 140}
 141
 142void vcpu_put(struct kvm_vcpu *vcpu)
 143{
 144        preempt_disable();
 145        kvm_arch_vcpu_put(vcpu);
 146        preempt_notifier_unregister(&vcpu->preempt_notifier);
 147        preempt_enable();
 148        mutex_unlock(&vcpu->mutex);
 149}
 150
 151static void ack_flush(void *_completed)
 152{
 153}
 154
 155static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
 156{
 157        int i, cpu, me;
 158        cpumask_var_t cpus;
 159        bool called = true;
 160        struct kvm_vcpu *vcpu;
 161
 162        zalloc_cpumask_var(&cpus, GFP_ATOMIC);
 163
 164        me = get_cpu();
 165        kvm_for_each_vcpu(i, vcpu, kvm) {
 166                kvm_make_request(req, vcpu);
 167                cpu = vcpu->cpu;
 168
 169                /* Set ->requests bit before we read ->mode */
 170                smp_mb();
 171
 172                if (cpus != NULL && cpu != -1 && cpu != me &&
 173                      kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
 174                        cpumask_set_cpu(cpu, cpus);
 175        }
 176        if (unlikely(cpus == NULL))
 177                smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
 178        else if (!cpumask_empty(cpus))
 179                smp_call_function_many(cpus, ack_flush, NULL, 1);
 180        else
 181                called = false;
 182        put_cpu();
 183        free_cpumask_var(cpus);
 184        return called;
 185}
 186
 187void kvm_flush_remote_tlbs(struct kvm *kvm)
 188{
 189        if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
 190                ++kvm->stat.remote_tlb_flush;
 191        kvm->tlbs_dirty = false;
 192}
 193EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
 194
 195void kvm_reload_remote_mmus(struct kvm *kvm)
 196{
 197        make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
 198}
 199
 200void kvm_make_mclock_inprogress_request(struct kvm *kvm)
 201{
 202        make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
 203}
 204
 205void kvm_make_scan_ioapic_request(struct kvm *kvm)
 206{
 207        make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
 208}
 209
 210int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 211{
 212        struct page *page;
 213        int r;
 214
 215        mutex_init(&vcpu->mutex);
 216        vcpu->cpu = -1;
 217        vcpu->kvm = kvm;
 218        vcpu->vcpu_id = id;
 219        vcpu->pid = NULL;
 220        init_waitqueue_head(&vcpu->wq);
 221        kvm_async_pf_vcpu_init(vcpu);
 222
 223        page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 224        if (!page) {
 225                r = -ENOMEM;
 226                goto fail;
 227        }
 228        vcpu->run = page_address(page);
 229
 230        kvm_vcpu_set_in_spin_loop(vcpu, false);
 231        kvm_vcpu_set_dy_eligible(vcpu, false);
 232        vcpu->preempted = false;
 233
 234        r = kvm_arch_vcpu_init(vcpu);
 235        if (r < 0)
 236                goto fail_free_run;
 237        return 0;
 238
 239fail_free_run:
 240        free_page((unsigned long)vcpu->run);
 241fail:
 242        return r;
 243}
 244EXPORT_SYMBOL_GPL(kvm_vcpu_init);
 245
 246void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
 247{
 248        put_pid(vcpu->pid);
 249        kvm_arch_vcpu_uninit(vcpu);
 250        free_page((unsigned long)vcpu->run);
 251}
 252EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
 253
 254#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 255static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
 256{
 257        return container_of(mn, struct kvm, mmu_notifier);
 258}
 259
 260static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
 261                                             struct mm_struct *mm,
 262                                             unsigned long address)
 263{
 264        struct kvm *kvm = mmu_notifier_to_kvm(mn);
 265        int need_tlb_flush, idx;
 266
 267        /*
 268         * When ->invalidate_page runs, the linux pte has been zapped
 269         * already but the page is still allocated until
 270         * ->invalidate_page returns. So if we increase the sequence
 271         * here the kvm page fault will notice if the spte can't be
 272         * established because the page is going to be freed. If
 273         * instead the kvm page fault establishes the spte before
 274         * ->invalidate_page runs, kvm_unmap_hva will release it
 275         * before returning.
 276         *
 277         * The sequence increase only need to be seen at spin_unlock
 278         * time, and not at spin_lock time.
 279         *
 280         * Increasing the sequence after the spin_unlock would be
 281         * unsafe because the kvm page fault could then establish the
 282         * pte after kvm_unmap_hva returned, without noticing the page
 283         * is going to be freed.
 284         */
 285        idx = srcu_read_lock(&kvm->srcu);
 286        spin_lock(&kvm->mmu_lock);
 287
 288        kvm->mmu_notifier_seq++;
 289        need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
 290        /* we've to flush the tlb before the pages can be freed */
 291        if (need_tlb_flush)
 292                kvm_flush_remote_tlbs(kvm);
 293
 294        spin_unlock(&kvm->mmu_lock);
 295        srcu_read_unlock(&kvm->srcu, idx);
 296}
 297
 298static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
 299                                        struct mm_struct *mm,
 300                                        unsigned long address,
 301                                        pte_t pte)
 302{
 303        struct kvm *kvm = mmu_notifier_to_kvm(mn);
 304        int idx;
 305
 306        idx = srcu_read_lock(&kvm->srcu);
 307        spin_lock(&kvm->mmu_lock);
 308        kvm->mmu_notifier_seq++;
 309        kvm_set_spte_hva(kvm, address, pte);
 310        spin_unlock(&kvm->mmu_lock);
 311        srcu_read_unlock(&kvm->srcu, idx);
 312}
 313
 314static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
 315                                                    struct mm_struct *mm,
 316                                                    unsigned long start,
 317                                                    unsigned long end)
 318{
 319        struct kvm *kvm = mmu_notifier_to_kvm(mn);
 320        int need_tlb_flush = 0, idx;
 321
 322        idx = srcu_read_lock(&kvm->srcu);
 323        spin_lock(&kvm->mmu_lock);
 324        /*
 325         * The count increase must become visible at unlock time as no
 326         * spte can be established without taking the mmu_lock and
 327         * count is also read inside the mmu_lock critical section.
 328         */
 329        kvm->mmu_notifier_count++;
 330        need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
 331        need_tlb_flush |= kvm->tlbs_dirty;
 332        /* we've to flush the tlb before the pages can be freed */
 333        if (need_tlb_flush)
 334                kvm_flush_remote_tlbs(kvm);
 335
 336        spin_unlock(&kvm->mmu_lock);
 337        srcu_read_unlock(&kvm->srcu, idx);
 338}
 339
 340static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
 341                                                  struct mm_struct *mm,
 342                                                  unsigned long start,
 343                                                  unsigned long end)
 344{
 345        struct kvm *kvm = mmu_notifier_to_kvm(mn);
 346
 347        spin_lock(&kvm->mmu_lock);
 348        /*
 349         * This sequence increase will notify the kvm page fault that
 350         * the page that is going to be mapped in the spte could have
 351         * been freed.
 352         */
 353        kvm->mmu_notifier_seq++;
 354        smp_wmb();
 355        /*
 356         * The above sequence increase must be visible before the
 357         * below count decrease, which is ensured by the smp_wmb above
 358         * in conjunction with the smp_rmb in mmu_notifier_retry().
 359         */
 360        kvm->mmu_notifier_count--;
 361        spin_unlock(&kvm->mmu_lock);
 362
 363        BUG_ON(kvm->mmu_notifier_count < 0);
 364}
 365
 366static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
 367                                              struct mm_struct *mm,
 368                                              unsigned long address)
 369{
 370        struct kvm *kvm = mmu_notifier_to_kvm(mn);
 371        int young, idx;
 372
 373        idx = srcu_read_lock(&kvm->srcu);
 374        spin_lock(&kvm->mmu_lock);
 375
 376        young = kvm_age_hva(kvm, address);
 377        if (young)
 378                kvm_flush_remote_tlbs(kvm);
 379
 380        spin_unlock(&kvm->mmu_lock);
 381        srcu_read_unlock(&kvm->srcu, idx);
 382
 383        return young;
 384}
 385
 386static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
 387                                       struct mm_struct *mm,
 388                                       unsigned long address)
 389{
 390        struct kvm *kvm = mmu_notifier_to_kvm(mn);
 391        int young, idx;
 392
 393        idx = srcu_read_lock(&kvm->srcu);
 394        spin_lock(&kvm->mmu_lock);
 395        young = kvm_test_age_hva(kvm, address);
 396        spin_unlock(&kvm->mmu_lock);
 397        srcu_read_unlock(&kvm->srcu, idx);
 398
 399        return young;
 400}
 401
 402static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
 403                                     struct mm_struct *mm)
 404{
 405        struct kvm *kvm = mmu_notifier_to_kvm(mn);
 406        int idx;
 407
 408        idx = srcu_read_lock(&kvm->srcu);
 409        kvm_arch_flush_shadow_all(kvm);
 410        srcu_read_unlock(&kvm->srcu, idx);
 411}
 412
 413static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
 414        .invalidate_page        = kvm_mmu_notifier_invalidate_page,
 415        .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
 416        .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
 417        .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
 418        .test_young             = kvm_mmu_notifier_test_young,
 419        .change_pte             = kvm_mmu_notifier_change_pte,
 420        .release                = kvm_mmu_notifier_release,
 421};
 422
 423static int kvm_init_mmu_notifier(struct kvm *kvm)
 424{
 425        kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
 426        return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
 427}
 428
 429#else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
 430
 431static int kvm_init_mmu_notifier(struct kvm *kvm)
 432{
 433        return 0;
 434}
 435
 436#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
 437
 438static void kvm_init_memslots_id(struct kvm *kvm)
 439{
 440        int i;
 441        struct kvm_memslots *slots = kvm->memslots;
 442
 443        for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
 444                slots->id_to_index[i] = slots->memslots[i].id = i;
 445}
 446
 447static struct kvm *kvm_create_vm(unsigned long type)
 448{
 449        int r, i;
 450        struct kvm *kvm = kvm_arch_alloc_vm();
 451
 452        if (!kvm)
 453                return ERR_PTR(-ENOMEM);
 454
 455        r = kvm_arch_init_vm(kvm, type);
 456        if (r)
 457                goto out_err_nodisable;
 458
 459        r = hardware_enable_all();
 460        if (r)
 461                goto out_err_nodisable;
 462
 463#ifdef CONFIG_HAVE_KVM_IRQCHIP
 464        INIT_HLIST_HEAD(&kvm->mask_notifier_list);
 465        INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
 466#endif
 467
 468        BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
 469
 470        r = -ENOMEM;
 471        kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
 472        if (!kvm->memslots)
 473                goto out_err_nosrcu;
 474        kvm_init_memslots_id(kvm);
 475        if (init_srcu_struct(&kvm->srcu))
 476                goto out_err_nosrcu;
 477        for (i = 0; i < KVM_NR_BUSES; i++) {
 478                kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
 479                                        GFP_KERNEL);
 480                if (!kvm->buses[i])
 481                        goto out_err;
 482        }
 483
 484        spin_lock_init(&kvm->mmu_lock);
 485        kvm->mm = current->mm;
 486        atomic_inc(&kvm->mm->mm_count);
 487        kvm_eventfd_init(kvm);
 488        mutex_init(&kvm->lock);
 489        mutex_init(&kvm->irq_lock);
 490        mutex_init(&kvm->slots_lock);
 491        atomic_set(&kvm->users_count, 1);
 492        INIT_LIST_HEAD(&kvm->devices);
 493
 494        r = kvm_init_mmu_notifier(kvm);
 495        if (r)
 496                goto out_err;
 497
 498        spin_lock(&kvm_lock);
 499        list_add(&kvm->vm_list, &vm_list);
 500        spin_unlock(&kvm_lock);
 501
 502        return kvm;
 503
 504out_err:
 505        cleanup_srcu_struct(&kvm->srcu);
 506out_err_nosrcu:
 507        hardware_disable_all();
 508out_err_nodisable:
 509        for (i = 0; i < KVM_NR_BUSES; i++)
 510                kfree(kvm->buses[i]);
 511        kfree(kvm->memslots);
 512        kvm_arch_free_vm(kvm);
 513        return ERR_PTR(r);
 514}
 515
 516/*
 517 * Avoid using vmalloc for a small buffer.
 518 * Should not be used when the size is statically known.
 519 */
 520void *kvm_kvzalloc(unsigned long size)
 521{
 522        if (size > PAGE_SIZE)
 523                return vzalloc(size);
 524        else
 525                return kzalloc(size, GFP_KERNEL);
 526}
 527
 528void kvm_kvfree(const void *addr)
 529{
 530        if (is_vmalloc_addr(addr))
 531                vfree(addr);
 532        else
 533                kfree(addr);
 534}
 535
 536static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
 537{
 538        if (!memslot->dirty_bitmap)
 539                return;
 540
 541        kvm_kvfree(memslot->dirty_bitmap);
 542        memslot->dirty_bitmap = NULL;
 543}
 544
 545/*
 546 * Free any memory in @free but not in @dont.
 547 */
 548static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free,
 549                                  struct kvm_memory_slot *dont)
 550{
 551        if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
 552                kvm_destroy_dirty_bitmap(free);
 553
 554        kvm_arch_free_memslot(kvm, free, dont);
 555
 556        free->npages = 0;
 557}
 558
 559static void kvm_free_physmem(struct kvm *kvm)
 560{
 561        struct kvm_memslots *slots = kvm->memslots;
 562        struct kvm_memory_slot *memslot;
 563
 564        kvm_for_each_memslot(memslot, slots)
 565                kvm_free_physmem_slot(kvm, memslot, NULL);
 566
 567        kfree(kvm->memslots);
 568}
 569
 570static void kvm_destroy_devices(struct kvm *kvm)
 571{
 572        struct list_head *node, *tmp;
 573
 574        list_for_each_safe(node, tmp, &kvm->devices) {
 575                struct kvm_device *dev =
 576                        list_entry(node, struct kvm_device, vm_node);
 577
 578                list_del(node);
 579                dev->ops->destroy(dev);
 580        }
 581}
 582
 583static void kvm_destroy_vm(struct kvm *kvm)
 584{
 585        int i;
 586        struct mm_struct *mm = kvm->mm;
 587
 588        kvm_arch_sync_events(kvm);
 589        spin_lock(&kvm_lock);
 590        list_del(&kvm->vm_list);
 591        spin_unlock(&kvm_lock);
 592        kvm_free_irq_routing(kvm);
 593        for (i = 0; i < KVM_NR_BUSES; i++)
 594                kvm_io_bus_destroy(kvm->buses[i]);
 595        kvm_coalesced_mmio_free(kvm);
 596#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 597        mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
 598#else
 599        kvm_arch_flush_shadow_all(kvm);
 600#endif
 601        kvm_arch_destroy_vm(kvm);
 602        kvm_destroy_devices(kvm);
 603        kvm_free_physmem(kvm);
 604        cleanup_srcu_struct(&kvm->srcu);
 605        kvm_arch_free_vm(kvm);
 606        hardware_disable_all();
 607        mmdrop(mm);
 608}
 609
 610void kvm_get_kvm(struct kvm *kvm)
 611{
 612        atomic_inc(&kvm->users_count);
 613}
 614EXPORT_SYMBOL_GPL(kvm_get_kvm);
 615
 616void kvm_put_kvm(struct kvm *kvm)
 617{
 618        if (atomic_dec_and_test(&kvm->users_count))
 619                kvm_destroy_vm(kvm);
 620}
 621EXPORT_SYMBOL_GPL(kvm_put_kvm);
 622
 623
 624static int kvm_vm_release(struct inode *inode, struct file *filp)
 625{
 626        struct kvm *kvm = filp->private_data;
 627
 628        kvm_irqfd_release(kvm);
 629
 630        kvm_put_kvm(kvm);
 631        return 0;
 632}
 633
 634/*
 635 * Allocation size is twice as large as the actual dirty bitmap size.
 636 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
 637 */
 638static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
 639{
 640#ifndef CONFIG_S390
 641        unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
 642
 643        memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes);
 644        if (!memslot->dirty_bitmap)
 645                return -ENOMEM;
 646
 647#endif /* !CONFIG_S390 */
 648        return 0;
 649}
 650
 651static int cmp_memslot(const void *slot1, const void *slot2)
 652{
 653        struct kvm_memory_slot *s1, *s2;
 654
 655        s1 = (struct kvm_memory_slot *)slot1;
 656        s2 = (struct kvm_memory_slot *)slot2;
 657
 658        if (s1->npages < s2->npages)
 659                return 1;
 660        if (s1->npages > s2->npages)
 661                return -1;
 662
 663        return 0;
 664}
 665
 666/*
 667 * Sort the memslots base on its size, so the larger slots
 668 * will get better fit.
 669 */
 670static void sort_memslots(struct kvm_memslots *slots)
 671{
 672        int i;
 673
 674        sort(slots->memslots, KVM_MEM_SLOTS_NUM,
 675              sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
 676
 677        for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
 678                slots->id_to_index[slots->memslots[i].id] = i;
 679}
 680
 681static void update_memslots(struct kvm_memslots *slots,
 682                            struct kvm_memory_slot *new,
 683                            u64 last_generation)
 684{
 685        if (new) {
 686                int id = new->id;
 687                struct kvm_memory_slot *old = id_to_memslot(slots, id);
 688                unsigned long npages = old->npages;
 689
 690                *old = *new;
 691                if (new->npages != npages)
 692                        sort_memslots(slots);
 693        }
 694
 695        slots->generation = last_generation + 1;
 696}
 697
 698static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
 699{
 700        u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
 701
 702#ifdef KVM_CAP_READONLY_MEM
 703        valid_flags |= KVM_MEM_READONLY;
 704#endif
 705
 706        if (mem->flags & ~valid_flags)
 707                return -EINVAL;
 708
 709        return 0;
 710}
 711
 712static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
 713                struct kvm_memslots *slots, struct kvm_memory_slot *new)
 714{
 715        struct kvm_memslots *old_memslots = kvm->memslots;
 716
 717        update_memslots(slots, new, kvm->memslots->generation);
 718        rcu_assign_pointer(kvm->memslots, slots);
 719        synchronize_srcu_expedited(&kvm->srcu);
 720
 721        kvm_arch_memslots_updated(kvm);
 722
 723        return old_memslots;
 724}
 725
 726/*
 727 * Allocate some memory and give it an address in the guest physical address
 728 * space.
 729 *
 730 * Discontiguous memory is allowed, mostly for framebuffers.
 731 *
 732 * Must be called holding mmap_sem for write.
 733 */
 734int __kvm_set_memory_region(struct kvm *kvm,
 735                            struct kvm_userspace_memory_region *mem)
 736{
 737        int r;
 738        gfn_t base_gfn;
 739        unsigned long npages;
 740        struct kvm_memory_slot *slot;
 741        struct kvm_memory_slot old, new;
 742        struct kvm_memslots *slots = NULL, *old_memslots;
 743        enum kvm_mr_change change;
 744
 745        r = check_memory_region_flags(mem);
 746        if (r)
 747                goto out;
 748
 749        r = -EINVAL;
 750        /* General sanity checks */
 751        if (mem->memory_size & (PAGE_SIZE - 1))
 752                goto out;
 753        if (mem->guest_phys_addr & (PAGE_SIZE - 1))
 754                goto out;
 755        /* We can read the guest memory with __xxx_user() later on. */
 756        if ((mem->slot < KVM_USER_MEM_SLOTS) &&
 757            ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
 758             !access_ok(VERIFY_WRITE,
 759                        (void __user *)(unsigned long)mem->userspace_addr,
 760                        mem->memory_size)))
 761                goto out;
 762        if (mem->slot >= KVM_MEM_SLOTS_NUM)
 763                goto out;
 764        if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
 765                goto out;
 766
 767        slot = id_to_memslot(kvm->memslots, mem->slot);
 768        base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
 769        npages = mem->memory_size >> PAGE_SHIFT;
 770
 771        r = -EINVAL;
 772        if (npages > KVM_MEM_MAX_NR_PAGES)
 773                goto out;
 774
 775        if (!npages)
 776                mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
 777
 778        new = old = *slot;
 779
 780        new.id = mem->slot;
 781        new.base_gfn = base_gfn;
 782        new.npages = npages;
 783        new.flags = mem->flags;
 784
 785        r = -EINVAL;
 786        if (npages) {
 787                if (!old.npages)
 788                        change = KVM_MR_CREATE;
 789                else { /* Modify an existing slot. */
 790                        if ((mem->userspace_addr != old.userspace_addr) ||
 791                            (npages != old.npages) ||
 792                            ((new.flags ^ old.flags) & KVM_MEM_READONLY))
 793                                goto out;
 794
 795                        if (base_gfn != old.base_gfn)
 796                                change = KVM_MR_MOVE;
 797                        else if (new.flags != old.flags)
 798                                change = KVM_MR_FLAGS_ONLY;
 799                        else { /* Nothing to change. */
 800                                r = 0;
 801                                goto out;
 802                        }
 803                }
 804        } else if (old.npages) {
 805                change = KVM_MR_DELETE;
 806        } else /* Modify a non-existent slot: disallowed. */
 807                goto out;
 808
 809        if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
 810                /* Check for overlaps */
 811                r = -EEXIST;
 812                kvm_for_each_memslot(slot, kvm->memslots) {
 813                        if ((slot->id >= KVM_USER_MEM_SLOTS) ||
 814                            (slot->id == mem->slot))
 815                                continue;
 816                        if (!((base_gfn + npages <= slot->base_gfn) ||
 817                              (base_gfn >= slot->base_gfn + slot->npages)))
 818                                goto out;
 819                }
 820        }
 821
 822        /* Free page dirty bitmap if unneeded */
 823        if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
 824                new.dirty_bitmap = NULL;
 825
 826        r = -ENOMEM;
 827        if (change == KVM_MR_CREATE) {
 828                new.userspace_addr = mem->userspace_addr;
 829
 830                if (kvm_arch_create_memslot(kvm, &new, npages))
 831                        goto out_free;
 832        }
 833
 834        /* Allocate page dirty bitmap if needed */
 835        if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
 836                if (kvm_create_dirty_bitmap(&new) < 0)
 837                        goto out_free;
 838        }
 839
 840        if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
 841                r = -ENOMEM;
 842                slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
 843                                GFP_KERNEL);
 844                if (!slots)
 845                        goto out_free;
 846                slot = id_to_memslot(slots, mem->slot);
 847                slot->flags |= KVM_MEMSLOT_INVALID;
 848
 849                old_memslots = install_new_memslots(kvm, slots, NULL);
 850
 851                /* slot was deleted or moved, clear iommu mapping */
 852                kvm_iommu_unmap_pages(kvm, &old);
 853                /* From this point no new shadow pages pointing to a deleted,
 854                 * or moved, memslot will be created.
 855                 *
 856                 * validation of sp->gfn happens in:
 857                 *      - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
 858                 *      - kvm_is_visible_gfn (mmu_check_roots)
 859                 */
 860                kvm_arch_flush_shadow_memslot(kvm, slot);
 861                slots = old_memslots;
 862        }
 863
 864        r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
 865        if (r)
 866                goto out_slots;
 867
 868        r = -ENOMEM;
 869        /*
 870         * We can re-use the old_memslots from above, the only difference
 871         * from the currently installed memslots is the invalid flag.  This
 872         * will get overwritten by update_memslots anyway.
 873         */
 874        if (!slots) {
 875                slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
 876                                GFP_KERNEL);
 877                if (!slots)
 878                        goto out_free;
 879        }
 880
 881        /* actual memory is freed via old in kvm_free_physmem_slot below */
 882        if (change == KVM_MR_DELETE) {
 883                new.dirty_bitmap = NULL;
 884                memset(&new.arch, 0, sizeof(new.arch));
 885        }
 886
 887        old_memslots = install_new_memslots(kvm, slots, &new);
 888
 889        kvm_arch_commit_memory_region(kvm, mem, &old, change);
 890
 891        kvm_free_physmem_slot(kvm, &old, &new);
 892        kfree(old_memslots);
 893
 894        /*
 895         * IOMMU mapping:  New slots need to be mapped.  Old slots need to be
 896         * un-mapped and re-mapped if their base changes.  Since base change
 897         * unmapping is handled above with slot deletion, mapping alone is
 898         * needed here.  Anything else the iommu might care about for existing
 899         * slots (size changes, userspace addr changes and read-only flag
 900         * changes) is disallowed above, so any other attribute changes getting
 901         * here can be skipped.
 902         */
 903        if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
 904                r = kvm_iommu_map_pages(kvm, &new);
 905                return r;
 906        }
 907
 908        return 0;
 909
 910out_slots:
 911        kfree(slots);
 912out_free:
 913        kvm_free_physmem_slot(kvm, &new, &old);
 914out:
 915        return r;
 916}
 917EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
 918
 919int kvm_set_memory_region(struct kvm *kvm,
 920                          struct kvm_userspace_memory_region *mem)
 921{
 922        int r;
 923
 924        mutex_lock(&kvm->slots_lock);
 925        r = __kvm_set_memory_region(kvm, mem);
 926        mutex_unlock(&kvm->slots_lock);
 927        return r;
 928}
 929EXPORT_SYMBOL_GPL(kvm_set_memory_region);
 930
 931static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
 932                                          struct kvm_userspace_memory_region *mem)
 933{
 934        if (mem->slot >= KVM_USER_MEM_SLOTS)
 935                return -EINVAL;
 936        return kvm_set_memory_region(kvm, mem);
 937}
 938
 939int kvm_get_dirty_log(struct kvm *kvm,
 940                        struct kvm_dirty_log *log, int *is_dirty)
 941{
 942        struct kvm_memory_slot *memslot;
 943        int r, i;
 944        unsigned long n;
 945        unsigned long any = 0;
 946
 947        r = -EINVAL;
 948        if (log->slot >= KVM_USER_MEM_SLOTS)
 949                goto out;
 950
 951        memslot = id_to_memslot(kvm->memslots, log->slot);
 952        r = -ENOENT;
 953        if (!memslot->dirty_bitmap)
 954                goto out;
 955
 956        n = kvm_dirty_bitmap_bytes(memslot);
 957
 958        for (i = 0; !any && i < n/sizeof(long); ++i)
 959                any = memslot->dirty_bitmap[i];
 960
 961        r = -EFAULT;
 962        if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
 963                goto out;
 964
 965        if (any)
 966                *is_dirty = 1;
 967
 968        r = 0;
 969out:
 970        return r;
 971}
 972EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
 973
 974bool kvm_largepages_enabled(void)
 975{
 976        return largepages_enabled;
 977}
 978
 979void kvm_disable_largepages(void)
 980{
 981        largepages_enabled = false;
 982}
 983EXPORT_SYMBOL_GPL(kvm_disable_largepages);
 984
 985struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
 986{
 987        return __gfn_to_memslot(kvm_memslots(kvm), gfn);
 988}
 989EXPORT_SYMBOL_GPL(gfn_to_memslot);
 990
 991int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
 992{
 993        struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
 994
 995        if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS ||
 996              memslot->flags & KVM_MEMSLOT_INVALID)
 997                return 0;
 998
 999        return 1;
1000}
1001EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1002
1003unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
1004{
1005        struct vm_area_struct *vma;
1006        unsigned long addr, size;
1007
1008        size = PAGE_SIZE;
1009
1010        addr = gfn_to_hva(kvm, gfn);
1011        if (kvm_is_error_hva(addr))
1012                return PAGE_SIZE;
1013
1014        down_read(&current->mm->mmap_sem);
1015        vma = find_vma(current->mm, addr);
1016        if (!vma)
1017                goto out;
1018
1019        size = vma_kernel_pagesize(vma);
1020
1021out:
1022        up_read(&current->mm->mmap_sem);
1023
1024        return size;
1025}
1026
1027static bool memslot_is_readonly(struct kvm_memory_slot *slot)
1028{
1029        return slot->flags & KVM_MEM_READONLY;
1030}
1031
1032static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1033                                       gfn_t *nr_pages, bool write)
1034{
1035        if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
1036                return KVM_HVA_ERR_BAD;
1037
1038        if (memslot_is_readonly(slot) && write)
1039                return KVM_HVA_ERR_RO_BAD;
1040
1041        if (nr_pages)
1042                *nr_pages = slot->npages - (gfn - slot->base_gfn);
1043
1044        return __gfn_to_hva_memslot(slot, gfn);
1045}
1046
1047static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1048                                     gfn_t *nr_pages)
1049{
1050        return __gfn_to_hva_many(slot, gfn, nr_pages, true);
1051}
1052
1053unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
1054                                        gfn_t gfn)
1055{
1056        return gfn_to_hva_many(slot, gfn, NULL);
1057}
1058EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
1059
1060unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1061{
1062        return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
1063}
1064EXPORT_SYMBOL_GPL(gfn_to_hva);
1065
1066/*
1067 * If writable is set to false, the hva returned by this function is only
1068 * allowed to be read.
1069 */
1070unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
1071{
1072        struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1073        unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
1074
1075        if (!kvm_is_error_hva(hva) && writable)
1076                *writable = !memslot_is_readonly(slot);
1077
1078        return hva;
1079}
1080
1081static int kvm_read_hva(void *data, void __user *hva, int len)
1082{
1083        return __copy_from_user(data, hva, len);
1084}
1085
1086static int kvm_read_hva_atomic(void *data, void __user *hva, int len)
1087{
1088        return __copy_from_user_inatomic(data, hva, len);
1089}
1090
1091static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
1092        unsigned long start, int write, struct page **page)
1093{
1094        int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
1095
1096        if (write)
1097                flags |= FOLL_WRITE;
1098
1099        return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
1100}
1101
1102static inline int check_user_page_hwpoison(unsigned long addr)
1103{
1104        int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
1105
1106        rc = __get_user_pages(current, current->mm, addr, 1,
1107                              flags, NULL, NULL, NULL);
1108        return rc == -EHWPOISON;
1109}
1110
1111/*
1112 * The atomic path to get the writable pfn which will be stored in @pfn,
1113 * true indicates success, otherwise false is returned.
1114 */
1115static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
1116                            bool write_fault, bool *writable, pfn_t *pfn)
1117{
1118        struct page *page[1];
1119        int npages;
1120
1121        if (!(async || atomic))
1122                return false;
1123
1124        /*
1125         * Fast pin a writable pfn only if it is a write fault request
1126         * or the caller allows to map a writable pfn for a read fault
1127         * request.
1128         */
1129        if (!(write_fault || writable))
1130                return false;
1131
1132        npages = __get_user_pages_fast(addr, 1, 1, page);
1133        if (npages == 1) {
1134                *pfn = page_to_pfn(page[0]);
1135
1136                if (writable)
1137                        *writable = true;
1138                return true;
1139        }
1140
1141        return false;
1142}
1143
1144/*
1145 * The slow path to get the pfn of the specified host virtual address,
1146 * 1 indicates success, -errno is returned if error is detected.
1147 */
1148static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
1149                           bool *writable, pfn_t *pfn)
1150{
1151        struct page *page[1];
1152        int npages = 0;
1153
1154        might_sleep();
1155
1156        if (writable)
1157                *writable = write_fault;
1158
1159        if (async) {
1160                down_read(&current->mm->mmap_sem);
1161                npages = get_user_page_nowait(current, current->mm,
1162                                              addr, write_fault, page);
1163                up_read(&current->mm->mmap_sem);
1164        } else
1165                npages = get_user_pages_fast(addr, 1, write_fault,
1166                                             page);
1167        if (npages != 1)
1168                return npages;
1169
1170        /* map read fault as writable if possible */
1171        if (unlikely(!write_fault) && writable) {
1172                struct page *wpage[1];
1173
1174                npages = __get_user_pages_fast(addr, 1, 1, wpage);
1175                if (npages == 1) {
1176                        *writable = true;
1177                        put_page(page[0]);
1178                        page[0] = wpage[0];
1179                }
1180
1181                npages = 1;
1182        }
1183        *pfn = page_to_pfn(page[0]);
1184        return npages;
1185}
1186
1187static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
1188{
1189        if (unlikely(!(vma->vm_flags & VM_READ)))
1190                return false;
1191
1192        if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
1193                return false;
1194
1195        return true;
1196}
1197
1198/*
1199 * Pin guest page in memory and return its pfn.
1200 * @addr: host virtual address which maps memory to the guest
1201 * @atomic: whether this function can sleep
1202 * @async: whether this function need to wait IO complete if the
1203 *         host page is not in the memory
1204 * @write_fault: whether we should get a writable host page
1205 * @writable: whether it allows to map a writable host page for !@write_fault
1206 *
1207 * The function will map a writable host page for these two cases:
1208 * 1): @write_fault = true
1209 * 2): @write_fault = false && @writable, @writable will tell the caller
1210 *     whether the mapping is writable.
1211 */
1212static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1213                        bool write_fault, bool *writable)
1214{
1215        struct vm_area_struct *vma;
1216        pfn_t pfn = 0;
1217        int npages;
1218
1219        /* we can do it either atomically or asynchronously, not both */
1220        BUG_ON(atomic && async);
1221
1222        if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
1223                return pfn;
1224
1225        if (atomic)
1226                return KVM_PFN_ERR_FAULT;
1227
1228        npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
1229        if (npages == 1)
1230                return pfn;
1231
1232        down_read(&current->mm->mmap_sem);
1233        if (npages == -EHWPOISON ||
1234              (!async && check_user_page_hwpoison(addr))) {
1235                pfn = KVM_PFN_ERR_HWPOISON;
1236                goto exit;
1237        }
1238
1239        vma = find_vma_intersection(current->mm, addr, addr + 1);
1240
1241        if (vma == NULL)
1242                pfn = KVM_PFN_ERR_FAULT;
1243        else if ((vma->vm_flags & VM_PFNMAP)) {
1244                pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1245                        vma->vm_pgoff;
1246                BUG_ON(!kvm_is_mmio_pfn(pfn));
1247        } else {
1248                if (async && vma_is_valid(vma, write_fault))
1249                        *async = true;
1250                pfn = KVM_PFN_ERR_FAULT;
1251        }
1252exit:
1253        up_read(&current->mm->mmap_sem);
1254        return pfn;
1255}
1256
1257static pfn_t
1258__gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
1259                     bool *async, bool write_fault, bool *writable)
1260{
1261        unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
1262
1263        if (addr == KVM_HVA_ERR_RO_BAD)
1264                return KVM_PFN_ERR_RO_FAULT;
1265
1266        if (kvm_is_error_hva(addr))
1267                return KVM_PFN_NOSLOT;
1268
1269        /* Do not map writable pfn in the readonly memslot. */
1270        if (writable && memslot_is_readonly(slot)) {
1271                *writable = false;
1272                writable = NULL;
1273        }
1274
1275        return hva_to_pfn(addr, atomic, async, write_fault,
1276                          writable);
1277}
1278
1279static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
1280                          bool write_fault, bool *writable)
1281{
1282        struct kvm_memory_slot *slot;
1283
1284        if (async)
1285                *async = false;
1286
1287        slot = gfn_to_memslot(kvm, gfn);
1288
1289        return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault,
1290                                    writable);
1291}
1292
1293pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
1294{
1295        return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
1296}
1297EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1298
1299pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
1300                       bool write_fault, bool *writable)
1301{
1302        return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
1303}
1304EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
1305
1306pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1307{
1308        return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
1309}
1310EXPORT_SYMBOL_GPL(gfn_to_pfn);
1311
1312pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1313                      bool *writable)
1314{
1315        return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
1316}
1317EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1318
1319pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1320{
1321        return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
1322}
1323
1324pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
1325{
1326        return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
1327}
1328EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
1329
1330int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
1331                                                                  int nr_pages)
1332{
1333        unsigned long addr;
1334        gfn_t entry;
1335
1336        addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
1337        if (kvm_is_error_hva(addr))
1338                return -1;
1339
1340        if (entry < nr_pages)
1341                return 0;
1342
1343        return __get_user_pages_fast(addr, nr_pages, 1, pages);
1344}
1345EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1346
1347static struct page *kvm_pfn_to_page(pfn_t pfn)
1348{
1349        if (is_error_noslot_pfn(pfn))
1350                return KVM_ERR_PTR_BAD_PAGE;
1351
1352        if (kvm_is_mmio_pfn(pfn)) {
1353                WARN_ON(1);
1354                return KVM_ERR_PTR_BAD_PAGE;
1355        }
1356
1357        return pfn_to_page(pfn);
1358}
1359
1360struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1361{
1362        pfn_t pfn;
1363
1364        pfn = gfn_to_pfn(kvm, gfn);
1365
1366        return kvm_pfn_to_page(pfn);
1367}
1368
1369EXPORT_SYMBOL_GPL(gfn_to_page);
1370
1371void kvm_release_page_clean(struct page *page)
1372{
1373        WARN_ON(is_error_page(page));
1374
1375        kvm_release_pfn_clean(page_to_pfn(page));
1376}
1377EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1378
1379void kvm_release_pfn_clean(pfn_t pfn)
1380{
1381        if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
1382                put_page(pfn_to_page(pfn));
1383}
1384EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1385
1386void kvm_release_page_dirty(struct page *page)
1387{
1388        WARN_ON(is_error_page(page));
1389
1390        kvm_release_pfn_dirty(page_to_pfn(page));
1391}
1392EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1393
1394static void kvm_release_pfn_dirty(pfn_t pfn)
1395{
1396        kvm_set_pfn_dirty(pfn);
1397        kvm_release_pfn_clean(pfn);
1398}
1399
1400void kvm_set_pfn_dirty(pfn_t pfn)
1401{
1402        if (!kvm_is_mmio_pfn(pfn)) {
1403                struct page *page = pfn_to_page(pfn);
1404                if (!PageReserved(page))
1405                        SetPageDirty(page);
1406        }
1407}
1408EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1409
1410void kvm_set_pfn_accessed(pfn_t pfn)
1411{
1412        if (!kvm_is_mmio_pfn(pfn))
1413                mark_page_accessed(pfn_to_page(pfn));
1414}
1415EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1416
1417void kvm_get_pfn(pfn_t pfn)
1418{
1419        if (!kvm_is_mmio_pfn(pfn))
1420                get_page(pfn_to_page(pfn));
1421}
1422EXPORT_SYMBOL_GPL(kvm_get_pfn);
1423
1424static int next_segment(unsigned long len, int offset)
1425{
1426        if (len > PAGE_SIZE - offset)
1427                return PAGE_SIZE - offset;
1428        else
1429                return len;
1430}
1431
1432int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1433                        int len)
1434{
1435        int r;
1436        unsigned long addr;
1437
1438        addr = gfn_to_hva_prot(kvm, gfn, NULL);
1439        if (kvm_is_error_hva(addr))
1440                return -EFAULT;
1441        r = kvm_read_hva(data, (void __user *)addr + offset, len);
1442        if (r)
1443                return -EFAULT;
1444        return 0;
1445}
1446EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1447
1448int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1449{
1450        gfn_t gfn = gpa >> PAGE_SHIFT;
1451        int seg;
1452        int offset = offset_in_page(gpa);
1453        int ret;
1454
1455        while ((seg = next_segment(len, offset)) != 0) {
1456                ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1457                if (ret < 0)
1458                        return ret;
1459                offset = 0;
1460                len -= seg;
1461                data += seg;
1462                ++gfn;
1463        }
1464        return 0;
1465}
1466EXPORT_SYMBOL_GPL(kvm_read_guest);
1467
1468int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1469                          unsigned long len)
1470{
1471        int r;
1472        unsigned long addr;
1473        gfn_t gfn = gpa >> PAGE_SHIFT;
1474        int offset = offset_in_page(gpa);
1475
1476        addr = gfn_to_hva_prot(kvm, gfn, NULL);
1477        if (kvm_is_error_hva(addr))
1478                return -EFAULT;
1479        pagefault_disable();
1480        r = kvm_read_hva_atomic(data, (void __user *)addr + offset, len);
1481        pagefault_enable();
1482        if (r)
1483                return -EFAULT;
1484        return 0;
1485}
1486EXPORT_SYMBOL(kvm_read_guest_atomic);
1487
1488int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1489                         int offset, int len)
1490{
1491        int r;
1492        unsigned long addr;
1493
1494        addr = gfn_to_hva(kvm, gfn);
1495        if (kvm_is_error_hva(addr))
1496                return -EFAULT;
1497        r = __copy_to_user((void __user *)addr + offset, data, len);
1498        if (r)
1499                return -EFAULT;
1500        mark_page_dirty(kvm, gfn);
1501        return 0;
1502}
1503EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1504
1505int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1506                    unsigned long len)
1507{
1508        gfn_t gfn = gpa >> PAGE_SHIFT;
1509        int seg;
1510        int offset = offset_in_page(gpa);
1511        int ret;
1512
1513        while ((seg = next_segment(len, offset)) != 0) {
1514                ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1515                if (ret < 0)
1516                        return ret;
1517                offset = 0;
1518                len -= seg;
1519                data += seg;
1520                ++gfn;
1521        }
1522        return 0;
1523}
1524
1525int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1526                              gpa_t gpa, unsigned long len)
1527{
1528        struct kvm_memslots *slots = kvm_memslots(kvm);
1529        int offset = offset_in_page(gpa);
1530        gfn_t start_gfn = gpa >> PAGE_SHIFT;
1531        gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
1532        gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
1533        gfn_t nr_pages_avail;
1534
1535        ghc->gpa = gpa;
1536        ghc->generation = slots->generation;
1537        ghc->len = len;
1538        ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1539        ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
1540        if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
1541                ghc->hva += offset;
1542        } else {
1543                /*
1544                 * If the requested region crosses two memslots, we still
1545                 * verify that the entire region is valid here.
1546                 */
1547                while (start_gfn <= end_gfn) {
1548                        ghc->memslot = gfn_to_memslot(kvm, start_gfn);
1549                        ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
1550                                                   &nr_pages_avail);
1551                        if (kvm_is_error_hva(ghc->hva))
1552                                return -EFAULT;
1553                        start_gfn += nr_pages_avail;
1554                }
1555                /* Use the slow path for cross page reads and writes. */
1556                ghc->memslot = NULL;
1557        }
1558        return 0;
1559}
1560EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1561
1562int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1563                           void *data, unsigned long len)
1564{
1565        struct kvm_memslots *slots = kvm_memslots(kvm);
1566        int r;
1567
1568        BUG_ON(len > ghc->len);
1569
1570        if (slots->generation != ghc->generation)
1571                kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1572
1573        if (unlikely(!ghc->memslot))
1574                return kvm_write_guest(kvm, ghc->gpa, data, len);
1575
1576        if (kvm_is_error_hva(ghc->hva))
1577                return -EFAULT;
1578
1579        r = __copy_to_user((void __user *)ghc->hva, data, len);
1580        if (r)
1581                return -EFAULT;
1582        mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
1583
1584        return 0;
1585}
1586EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
1587
1588int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1589                           void *data, unsigned long len)
1590{
1591        struct kvm_memslots *slots = kvm_memslots(kvm);
1592        int r;
1593
1594        BUG_ON(len > ghc->len);
1595
1596        if (slots->generation != ghc->generation)
1597                kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1598
1599        if (unlikely(!ghc->memslot))
1600                return kvm_read_guest(kvm, ghc->gpa, data, len);
1601
1602        if (kvm_is_error_hva(ghc->hva))
1603                return -EFAULT;
1604
1605        r = __copy_from_user(data, (void __user *)ghc->hva, len);
1606        if (r)
1607                return -EFAULT;
1608
1609        return 0;
1610}
1611EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
1612
1613int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1614{
1615        const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
1616
1617        return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
1618}
1619EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1620
1621int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1622{
1623        gfn_t gfn = gpa >> PAGE_SHIFT;
1624        int seg;
1625        int offset = offset_in_page(gpa);
1626        int ret;
1627
1628        while ((seg = next_segment(len, offset)) != 0) {
1629                ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1630                if (ret < 0)
1631                        return ret;
1632                offset = 0;
1633                len -= seg;
1634                ++gfn;
1635        }
1636        return 0;
1637}
1638EXPORT_SYMBOL_GPL(kvm_clear_guest);
1639
1640static void mark_page_dirty_in_slot(struct kvm *kvm,
1641                                    struct kvm_memory_slot *memslot,
1642                                    gfn_t gfn)
1643{
1644        if (memslot && memslot->dirty_bitmap) {
1645                unsigned long rel_gfn = gfn - memslot->base_gfn;
1646
1647                set_bit_le(rel_gfn, memslot->dirty_bitmap);
1648        }
1649}
1650
1651void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1652{
1653        struct kvm_memory_slot *memslot;
1654
1655        memslot = gfn_to_memslot(kvm, gfn);
1656        mark_page_dirty_in_slot(kvm, memslot, gfn);
1657}
1658EXPORT_SYMBOL_GPL(mark_page_dirty);
1659
1660/*
1661 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1662 */
1663void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1664{
1665        DEFINE_WAIT(wait);
1666
1667        for (;;) {
1668                prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1669
1670                if (kvm_arch_vcpu_runnable(vcpu)) {
1671                        kvm_make_request(KVM_REQ_UNHALT, vcpu);
1672                        break;
1673                }
1674                if (kvm_cpu_has_pending_timer(vcpu))
1675                        break;
1676                if (signal_pending(current))
1677                        break;
1678
1679                schedule();
1680        }
1681
1682        finish_wait(&vcpu->wq, &wait);
1683}
1684EXPORT_SYMBOL_GPL(kvm_vcpu_block);
1685
1686#ifndef CONFIG_S390
1687/*
1688 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
1689 */
1690void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1691{
1692        int me;
1693        int cpu = vcpu->cpu;
1694        wait_queue_head_t *wqp;
1695
1696        wqp = kvm_arch_vcpu_wq(vcpu);
1697        if (waitqueue_active(wqp)) {
1698                wake_up_interruptible(wqp);
1699                ++vcpu->stat.halt_wakeup;
1700        }
1701
1702        me = get_cpu();
1703        if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
1704                if (kvm_arch_vcpu_should_kick(vcpu))
1705                        smp_send_reschedule(cpu);
1706        put_cpu();
1707}
1708EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
1709#endif /* !CONFIG_S390 */
1710
1711bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
1712{
1713        struct pid *pid;
1714        struct task_struct *task = NULL;
1715        bool ret = false;
1716
1717        rcu_read_lock();
1718        pid = rcu_dereference(target->pid);
1719        if (pid)
1720                task = get_pid_task(target->pid, PIDTYPE_PID);
1721        rcu_read_unlock();
1722        if (!task)
1723                return ret;
1724        if (task->flags & PF_VCPU) {
1725                put_task_struct(task);
1726                return ret;
1727        }
1728        ret = yield_to(task, 1);
1729        put_task_struct(task);
1730
1731        return ret;
1732}
1733EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
1734
1735/*
1736 * Helper that checks whether a VCPU is eligible for directed yield.
1737 * Most eligible candidate to yield is decided by following heuristics:
1738 *
1739 *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently
1740 *  (preempted lock holder), indicated by @in_spin_loop.
1741 *  Set at the beiginning and cleared at the end of interception/PLE handler.
1742 *
1743 *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
1744 *  chance last time (mostly it has become eligible now since we have probably
1745 *  yielded to lockholder in last iteration. This is done by toggling
1746 *  @dy_eligible each time a VCPU checked for eligibility.)
1747 *
1748 *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
1749 *  to preempted lock-holder could result in wrong VCPU selection and CPU
1750 *  burning. Giving priority for a potential lock-holder increases lock
1751 *  progress.
1752 *
1753 *  Since algorithm is based on heuristics, accessing another VCPU data without
1754 *  locking does not harm. It may result in trying to yield to  same VCPU, fail
1755 *  and continue with next VCPU and so on.
1756 */
1757static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
1758{
1759#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1760        bool eligible;
1761
1762        eligible = !vcpu->spin_loop.in_spin_loop ||
1763                        (vcpu->spin_loop.in_spin_loop &&
1764                         vcpu->spin_loop.dy_eligible);
1765
1766        if (vcpu->spin_loop.in_spin_loop)
1767                kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
1768
1769        return eligible;
1770#else
1771        return true;
1772#endif
1773}
1774
1775void kvm_vcpu_on_spin(struct kvm_vcpu *me)
1776{
1777        struct kvm *kvm = me->kvm;
1778        struct kvm_vcpu *vcpu;
1779        int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
1780        int yielded = 0;
1781        int try = 3;
1782        int pass;
1783        int i;
1784
1785        kvm_vcpu_set_in_spin_loop(me, true);
1786        /*
1787         * We boost the priority of a VCPU that is runnable but not
1788         * currently running, because it got preempted by something
1789         * else and called schedule in __vcpu_run.  Hopefully that
1790         * VCPU is holding the lock that we need and will release it.
1791         * We approximate round-robin by starting at the last boosted VCPU.
1792         */
1793        for (pass = 0; pass < 2 && !yielded && try; pass++) {
1794                kvm_for_each_vcpu(i, vcpu, kvm) {
1795                        if (!pass && i <= last_boosted_vcpu) {
1796                                i = last_boosted_vcpu;
1797                                continue;
1798                        } else if (pass && i > last_boosted_vcpu)
1799                                break;
1800                        if (!ACCESS_ONCE(vcpu->preempted))
1801                                continue;
1802                        if (vcpu == me)
1803                                continue;
1804                        if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
1805                                continue;
1806                        if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
1807                                continue;
1808
1809                        yielded = kvm_vcpu_yield_to(vcpu);
1810                        if (yielded > 0) {
1811                                kvm->last_boosted_vcpu = i;
1812                                break;
1813                        } else if (yielded < 0) {
1814                                try--;
1815                                if (!try)
1816                                        break;
1817                        }
1818                }
1819        }
1820        kvm_vcpu_set_in_spin_loop(me, false);
1821
1822        /* Ensure vcpu is not eligible during next spinloop */
1823        kvm_vcpu_set_dy_eligible(me, false);
1824}
1825EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
1826
1827static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1828{
1829        struct kvm_vcpu *vcpu = vma->vm_file->private_data;
1830        struct page *page;
1831
1832        if (vmf->pgoff == 0)
1833                page = virt_to_page(vcpu->run);
1834#ifdef CONFIG_X86
1835        else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1836                page = virt_to_page(vcpu->arch.pio_data);
1837#endif
1838#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1839        else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1840                page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
1841#endif
1842        else
1843                return kvm_arch_vcpu_fault(vcpu, vmf);
1844        get_page(page);
1845        vmf->page = page;
1846        return 0;
1847}
1848
1849static const struct vm_operations_struct kvm_vcpu_vm_ops = {
1850        .fault = kvm_vcpu_fault,
1851};
1852
1853static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1854{
1855        vma->vm_ops = &kvm_vcpu_vm_ops;
1856        return 0;
1857}
1858
1859static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1860{
1861        struct kvm_vcpu *vcpu = filp->private_data;
1862
1863        kvm_put_kvm(vcpu->kvm);
1864        return 0;
1865}
1866
1867static struct file_operations kvm_vcpu_fops = {
1868        .release        = kvm_vcpu_release,
1869        .unlocked_ioctl = kvm_vcpu_ioctl,
1870#ifdef CONFIG_COMPAT
1871        .compat_ioctl   = kvm_vcpu_compat_ioctl,
1872#endif
1873        .mmap           = kvm_vcpu_mmap,
1874        .llseek         = noop_llseek,
1875};
1876
1877/*
1878 * Allocates an inode for the vcpu.
1879 */
1880static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1881{
1882        return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
1883}
1884
1885/*
1886 * Creates some virtual cpus.  Good luck creating more than one.
1887 */
1888static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1889{
1890        int r;
1891        struct kvm_vcpu *vcpu, *v;
1892
1893        if (id >= KVM_MAX_VCPUS)
1894                return -EINVAL;
1895
1896        vcpu = kvm_arch_vcpu_create(kvm, id);
1897        if (IS_ERR(vcpu))
1898                return PTR_ERR(vcpu);
1899
1900        preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1901
1902        r = kvm_arch_vcpu_setup(vcpu);
1903        if (r)
1904                goto vcpu_destroy;
1905
1906        mutex_lock(&kvm->lock);
1907        if (!kvm_vcpu_compatible(vcpu)) {
1908                r = -EINVAL;
1909                goto unlock_vcpu_destroy;
1910        }
1911        if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1912                r = -EINVAL;
1913                goto unlock_vcpu_destroy;
1914        }
1915
1916        kvm_for_each_vcpu(r, v, kvm)
1917                if (v->vcpu_id == id) {
1918                        r = -EEXIST;
1919                        goto unlock_vcpu_destroy;
1920                }
1921
1922        BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
1923
1924        /* Now it's all set up, let userspace reach it */
1925        kvm_get_kvm(kvm);
1926        r = create_vcpu_fd(vcpu);
1927        if (r < 0) {
1928                kvm_put_kvm(kvm);
1929                goto unlock_vcpu_destroy;
1930        }
1931
1932        kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1933        smp_wmb();
1934        atomic_inc(&kvm->online_vcpus);
1935
1936        mutex_unlock(&kvm->lock);
1937        kvm_arch_vcpu_postcreate(vcpu);
1938        return r;
1939
1940unlock_vcpu_destroy:
1941        mutex_unlock(&kvm->lock);
1942vcpu_destroy:
1943        kvm_arch_vcpu_destroy(vcpu);
1944        return r;
1945}
1946
1947static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1948{
1949        if (sigset) {
1950                sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1951                vcpu->sigset_active = 1;
1952                vcpu->sigset = *sigset;
1953        } else
1954                vcpu->sigset_active = 0;
1955        return 0;
1956}
1957
1958static long kvm_vcpu_ioctl(struct file *filp,
1959                           unsigned int ioctl, unsigned long arg)
1960{
1961        struct kvm_vcpu *vcpu = filp->private_data;
1962        void __user *argp = (void __user *)arg;
1963        int r;
1964        struct kvm_fpu *fpu = NULL;
1965        struct kvm_sregs *kvm_sregs = NULL;
1966
1967        if (vcpu->kvm->mm != current->mm)
1968                return -EIO;
1969
1970#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
1971        /*
1972         * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
1973         * so vcpu_load() would break it.
1974         */
1975        if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
1976                return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1977#endif
1978
1979
1980        r = vcpu_load(vcpu);
1981        if (r)
1982                return r;
1983        switch (ioctl) {
1984        case KVM_RUN:
1985                r = -EINVAL;
1986                if (arg)
1987                        goto out;
1988                r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
1989                trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
1990                break;
1991        case KVM_GET_REGS: {
1992                struct kvm_regs *kvm_regs;
1993
1994                r = -ENOMEM;
1995                kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1996                if (!kvm_regs)
1997                        goto out;
1998                r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1999                if (r)
2000                        goto out_free1;
2001                r = -EFAULT;
2002                if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
2003                        goto out_free1;
2004                r = 0;
2005out_free1:
2006                kfree(kvm_regs);
2007                break;
2008        }
2009        case KVM_SET_REGS: {
2010                struct kvm_regs *kvm_regs;
2011
2012                r = -ENOMEM;
2013                kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
2014                if (IS_ERR(kvm_regs)) {
2015                        r = PTR_ERR(kvm_regs);
2016                        goto out;
2017                }
2018                r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
2019                kfree(kvm_regs);
2020                break;
2021        }
2022        case KVM_GET_SREGS: {
2023                kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
2024                r = -ENOMEM;
2025                if (!kvm_sregs)
2026                        goto out;
2027                r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
2028                if (r)
2029                        goto out;
2030                r = -EFAULT;
2031                if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
2032                        goto out;
2033                r = 0;
2034                break;
2035        }
2036        case KVM_SET_SREGS: {
2037                kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
2038                if (IS_ERR(kvm_sregs)) {
2039                        r = PTR_ERR(kvm_sregs);
2040                        kvm_sregs = NULL;
2041                        goto out;
2042                }
2043                r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
2044                break;
2045        }
2046        case KVM_GET_MP_STATE: {
2047                struct kvm_mp_state mp_state;
2048
2049                r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
2050                if (r)
2051                        goto out;
2052                r = -EFAULT;
2053                if (copy_to_user(argp, &mp_state, sizeof mp_state))
2054                        goto out;
2055                r = 0;
2056                break;
2057        }
2058        case KVM_SET_MP_STATE: {
2059                struct kvm_mp_state mp_state;
2060
2061                r = -EFAULT;
2062                if (copy_from_user(&mp_state, argp, sizeof mp_state))
2063                        goto out;
2064                r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
2065                break;
2066        }
2067        case KVM_TRANSLATE: {
2068                struct kvm_translation tr;
2069
2070                r = -EFAULT;
2071                if (copy_from_user(&tr, argp, sizeof tr))
2072                        goto out;
2073                r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
2074                if (r)
2075                        goto out;
2076                r = -EFAULT;
2077                if (copy_to_user(argp, &tr, sizeof tr))
2078                        goto out;
2079                r = 0;
2080                break;
2081        }
2082        case KVM_SET_GUEST_DEBUG: {
2083                struct kvm_guest_debug dbg;
2084
2085                r = -EFAULT;
2086                if (copy_from_user(&dbg, argp, sizeof dbg))
2087                        goto out;
2088                r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
2089                break;
2090        }
2091        case KVM_SET_SIGNAL_MASK: {
2092                struct kvm_signal_mask __user *sigmask_arg = argp;
2093                struct kvm_signal_mask kvm_sigmask;
2094                sigset_t sigset, *p;
2095
2096                p = NULL;
2097                if (argp) {
2098                        r = -EFAULT;
2099                        if (copy_from_user(&kvm_sigmask, argp,
2100                                           sizeof kvm_sigmask))
2101                                goto out;
2102                        r = -EINVAL;
2103                        if (kvm_sigmask.len != sizeof sigset)
2104                                goto out;
2105                        r = -EFAULT;
2106                        if (copy_from_user(&sigset, sigmask_arg->sigset,
2107                                           sizeof sigset))
2108                                goto out;
2109                        p = &sigset;
2110                }
2111                r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
2112                break;
2113        }
2114        case KVM_GET_FPU: {
2115                fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2116                r = -ENOMEM;
2117                if (!fpu)
2118                        goto out;
2119                r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
2120                if (r)
2121                        goto out;
2122                r = -EFAULT;
2123                if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
2124                        goto out;
2125                r = 0;
2126                break;
2127        }
2128        case KVM_SET_FPU: {
2129                fpu = memdup_user(argp, sizeof(*fpu));
2130                if (IS_ERR(fpu)) {
2131                        r = PTR_ERR(fpu);
2132                        fpu = NULL;
2133                        goto out;
2134                }
2135                r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
2136                break;
2137        }
2138        default:
2139                r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
2140        }
2141out:
2142        vcpu_put(vcpu);
2143        kfree(fpu);
2144        kfree(kvm_sregs);
2145        return r;
2146}
2147
2148#ifdef CONFIG_COMPAT
2149static long kvm_vcpu_compat_ioctl(struct file *filp,
2150                                  unsigned int ioctl, unsigned long arg)
2151{
2152        struct kvm_vcpu *vcpu = filp->private_data;
2153        void __user *argp = compat_ptr(arg);
2154        int r;
2155
2156        if (vcpu->kvm->mm != current->mm)
2157                return -EIO;
2158
2159        switch (ioctl) {
2160        case KVM_SET_SIGNAL_MASK: {
2161                struct kvm_signal_mask __user *sigmask_arg = argp;
2162                struct kvm_signal_mask kvm_sigmask;
2163                compat_sigset_t csigset;
2164                sigset_t sigset;
2165
2166                if (argp) {
2167                        r = -EFAULT;
2168                        if (copy_from_user(&kvm_sigmask, argp,
2169                                           sizeof kvm_sigmask))
2170                                goto out;
2171                        r = -EINVAL;
2172                        if (kvm_sigmask.len != sizeof csigset)
2173                                goto out;
2174                        r = -EFAULT;
2175                        if (copy_from_user(&csigset, sigmask_arg->sigset,
2176                                           sizeof csigset))
2177                                goto out;
2178                        sigset_from_compat(&sigset, &csigset);
2179                        r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2180                } else
2181                        r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
2182                break;
2183        }
2184        default:
2185                r = kvm_vcpu_ioctl(filp, ioctl, arg);
2186        }
2187
2188out:
2189        return r;
2190}
2191#endif
2192
2193static int kvm_device_ioctl_attr(struct kvm_device *dev,
2194                                 int (*accessor)(struct kvm_device *dev,
2195                                                 struct kvm_device_attr *attr),
2196                                 unsigned long arg)
2197{
2198        struct kvm_device_attr attr;
2199
2200        if (!accessor)
2201                return -EPERM;
2202
2203        if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2204                return -EFAULT;
2205
2206        return accessor(dev, &attr);
2207}
2208
2209static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
2210                             unsigned long arg)
2211{
2212        struct kvm_device *dev = filp->private_data;
2213
2214        switch (ioctl) {
2215        case KVM_SET_DEVICE_ATTR:
2216                return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
2217        case KVM_GET_DEVICE_ATTR:
2218                return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
2219        case KVM_HAS_DEVICE_ATTR:
2220                return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
2221        default:
2222                if (dev->ops->ioctl)
2223                        return dev->ops->ioctl(dev, ioctl, arg);
2224
2225                return -ENOTTY;
2226        }
2227}
2228
2229static int kvm_device_release(struct inode *inode, struct file *filp)
2230{
2231        struct kvm_device *dev = filp->private_data;
2232        struct kvm *kvm = dev->kvm;
2233
2234        kvm_put_kvm(kvm);
2235        return 0;
2236}
2237
2238static const struct file_operations kvm_device_fops = {
2239        .unlocked_ioctl = kvm_device_ioctl,
2240#ifdef CONFIG_COMPAT
2241        .compat_ioctl = kvm_device_ioctl,
2242#endif
2243        .release = kvm_device_release,
2244};
2245
2246struct kvm_device *kvm_device_from_filp(struct file *filp)
2247{
2248        if (filp->f_op != &kvm_device_fops)
2249                return NULL;
2250
2251        return filp->private_data;
2252}
2253
2254static int kvm_ioctl_create_device(struct kvm *kvm,
2255                                   struct kvm_create_device *cd)
2256{
2257        struct kvm_device_ops *ops = NULL;
2258        struct kvm_device *dev;
2259        bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
2260        int ret;
2261
2262        switch (cd->type) {
2263#ifdef CONFIG_KVM_MPIC
2264        case KVM_DEV_TYPE_FSL_MPIC_20:
2265        case KVM_DEV_TYPE_FSL_MPIC_42:
2266                ops = &kvm_mpic_ops;
2267                break;
2268#endif
2269#ifdef CONFIG_KVM_XICS
2270        case KVM_DEV_TYPE_XICS:
2271                ops = &kvm_xics_ops;
2272                break;
2273#endif
2274#ifdef CONFIG_KVM_VFIO
2275        case KVM_DEV_TYPE_VFIO:
2276                ops = &kvm_vfio_ops;
2277                break;
2278#endif
2279#ifdef CONFIG_KVM_ARM_VGIC
2280        case KVM_DEV_TYPE_ARM_VGIC_V2:
2281                ops = &kvm_arm_vgic_v2_ops;
2282                break;
2283#endif
2284#ifdef CONFIG_S390
2285        case KVM_DEV_TYPE_FLIC:
2286                ops = &kvm_flic_ops;
2287                break;
2288#endif
2289        default:
2290                return -ENODEV;
2291        }
2292
2293        if (test)
2294                return 0;
2295
2296        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2297        if (!dev)
2298                return -ENOMEM;
2299
2300        dev->ops = ops;
2301        dev->kvm = kvm;
2302
2303        ret = ops->create(dev, cd->type);
2304        if (ret < 0) {
2305                kfree(dev);
2306                return ret;
2307        }
2308
2309        ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
2310        if (ret < 0) {
2311                ops->destroy(dev);
2312                return ret;
2313        }
2314
2315        list_add(&dev->vm_node, &kvm->devices);
2316        kvm_get_kvm(kvm);
2317        cd->fd = ret;
2318        return 0;
2319}
2320
2321static long kvm_vm_ioctl(struct file *filp,
2322                           unsigned int ioctl, unsigned long arg)
2323{
2324        struct kvm *kvm = filp->private_data;
2325        void __user *argp = (void __user *)arg;
2326        int r;
2327
2328        if (kvm->mm != current->mm)
2329                return -EIO;
2330        switch (ioctl) {
2331        case KVM_CREATE_VCPU:
2332                r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2333                break;
2334        case KVM_SET_USER_MEMORY_REGION: {
2335                struct kvm_userspace_memory_region kvm_userspace_mem;
2336
2337                r = -EFAULT;
2338                if (copy_from_user(&kvm_userspace_mem, argp,
2339                                                sizeof kvm_userspace_mem))
2340                        goto out;
2341
2342                r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
2343                break;
2344        }
2345        case KVM_GET_DIRTY_LOG: {
2346                struct kvm_dirty_log log;
2347
2348                r = -EFAULT;
2349                if (copy_from_user(&log, argp, sizeof log))
2350                        goto out;
2351                r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
2352                break;
2353        }
2354#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2355        case KVM_REGISTER_COALESCED_MMIO: {
2356                struct kvm_coalesced_mmio_zone zone;
2357                r = -EFAULT;
2358                if (copy_from_user(&zone, argp, sizeof zone))
2359                        goto out;
2360                r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
2361                break;
2362        }
2363        case KVM_UNREGISTER_COALESCED_MMIO: {
2364                struct kvm_coalesced_mmio_zone zone;
2365                r = -EFAULT;
2366                if (copy_from_user(&zone, argp, sizeof zone))
2367                        goto out;
2368                r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
2369                break;
2370        }
2371#endif
2372        case KVM_IRQFD: {
2373                struct kvm_irqfd data;
2374
2375                r = -EFAULT;
2376                if (copy_from_user(&data, argp, sizeof data))
2377                        goto out;
2378                r = kvm_irqfd(kvm, &data);
2379                break;
2380        }
2381        case KVM_IOEVENTFD: {
2382                struct kvm_ioeventfd data;
2383
2384                r = -EFAULT;
2385                if (copy_from_user(&data, argp, sizeof data))
2386                        goto out;
2387                r = kvm_ioeventfd(kvm, &data);
2388                break;
2389        }
2390#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2391        case KVM_SET_BOOT_CPU_ID:
2392                r = 0;
2393                mutex_lock(&kvm->lock);
2394                if (atomic_read(&kvm->online_vcpus) != 0)
2395                        r = -EBUSY;
2396                else
2397                        kvm->bsp_vcpu_id = arg;
2398                mutex_unlock(&kvm->lock);
2399                break;
2400#endif
2401#ifdef CONFIG_HAVE_KVM_MSI
2402        case KVM_SIGNAL_MSI: {
2403                struct kvm_msi msi;
2404
2405                r = -EFAULT;
2406                if (copy_from_user(&msi, argp, sizeof msi))
2407                        goto out;
2408                r = kvm_send_userspace_msi(kvm, &msi);
2409                break;
2410        }
2411#endif
2412#ifdef __KVM_HAVE_IRQ_LINE
2413        case KVM_IRQ_LINE_STATUS:
2414        case KVM_IRQ_LINE: {
2415                struct kvm_irq_level irq_event;
2416
2417                r = -EFAULT;
2418                if (copy_from_user(&irq_event, argp, sizeof irq_event))
2419                        goto out;
2420
2421                r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
2422                                        ioctl == KVM_IRQ_LINE_STATUS);
2423                if (r)
2424                        goto out;
2425
2426                r = -EFAULT;
2427                if (ioctl == KVM_IRQ_LINE_STATUS) {
2428                        if (copy_to_user(argp, &irq_event, sizeof irq_event))
2429                                goto out;
2430                }
2431
2432                r = 0;
2433                break;
2434        }
2435#endif
2436#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2437        case KVM_SET_GSI_ROUTING: {
2438                struct kvm_irq_routing routing;
2439                struct kvm_irq_routing __user *urouting;
2440                struct kvm_irq_routing_entry *entries;
2441
2442                r = -EFAULT;
2443                if (copy_from_user(&routing, argp, sizeof(routing)))
2444                        goto out;
2445                r = -EINVAL;
2446                if (routing.nr >= KVM_MAX_IRQ_ROUTES)
2447                        goto out;
2448                if (routing.flags)
2449                        goto out;
2450                r = -ENOMEM;
2451                entries = vmalloc(routing.nr * sizeof(*entries));
2452                if (!entries)
2453                        goto out;
2454                r = -EFAULT;
2455                urouting = argp;
2456                if (copy_from_user(entries, urouting->entries,
2457                                   routing.nr * sizeof(*entries)))
2458                        goto out_free_irq_routing;
2459                r = kvm_set_irq_routing(kvm, entries, routing.nr,
2460                                        routing.flags);
2461        out_free_irq_routing:
2462                vfree(entries);
2463                break;
2464        }
2465#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
2466        case KVM_CREATE_DEVICE: {
2467                struct kvm_create_device cd;
2468
2469                r = -EFAULT;
2470                if (copy_from_user(&cd, argp, sizeof(cd)))
2471                        goto out;
2472
2473                r = kvm_ioctl_create_device(kvm, &cd);
2474                if (r)
2475                        goto out;
2476
2477                r = -EFAULT;
2478                if (copy_to_user(argp, &cd, sizeof(cd)))
2479                        goto out;
2480
2481                r = 0;
2482                break;
2483        }
2484        default:
2485                r = kvm_arch_vm_ioctl(filp, ioctl, arg);
2486                if (r == -ENOTTY)
2487                        r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
2488        }
2489out:
2490        return r;
2491}
2492
2493#ifdef CONFIG_COMPAT
2494struct compat_kvm_dirty_log {
2495        __u32 slot;
2496        __u32 padding1;
2497        union {
2498                compat_uptr_t dirty_bitmap; /* one bit per page */
2499                __u64 padding2;
2500        };
2501};
2502
2503static long kvm_vm_compat_ioctl(struct file *filp,
2504                           unsigned int ioctl, unsigned long arg)
2505{
2506        struct kvm *kvm = filp->private_data;
2507        int r;
2508
2509        if (kvm->mm != current->mm)
2510                return -EIO;
2511        switch (ioctl) {
2512        case KVM_GET_DIRTY_LOG: {
2513                struct compat_kvm_dirty_log compat_log;
2514                struct kvm_dirty_log log;
2515
2516                r = -EFAULT;
2517                if (copy_from_user(&compat_log, (void __user *)arg,
2518                                   sizeof(compat_log)))
2519                        goto out;
2520                log.slot         = compat_log.slot;
2521                log.padding1     = compat_log.padding1;
2522                log.padding2     = compat_log.padding2;
2523                log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
2524
2525                r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
2526                break;
2527        }
2528        default:
2529                r = kvm_vm_ioctl(filp, ioctl, arg);
2530        }
2531
2532out:
2533        return r;
2534}
2535#endif
2536
2537static struct file_operations kvm_vm_fops = {
2538        .release        = kvm_vm_release,
2539        .unlocked_ioctl = kvm_vm_ioctl,
2540#ifdef CONFIG_COMPAT
2541        .compat_ioctl   = kvm_vm_compat_ioctl,
2542#endif
2543        .llseek         = noop_llseek,
2544};
2545
2546static int kvm_dev_ioctl_create_vm(unsigned long type)
2547{
2548        int r;
2549        struct kvm *kvm;
2550
2551        kvm = kvm_create_vm(type);
2552        if (IS_ERR(kvm))
2553                return PTR_ERR(kvm);
2554#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2555        r = kvm_coalesced_mmio_init(kvm);
2556        if (r < 0) {
2557                kvm_put_kvm(kvm);
2558                return r;
2559        }
2560#endif
2561        r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC);
2562        if (r < 0)
2563                kvm_put_kvm(kvm);
2564
2565        return r;
2566}
2567
2568static long kvm_dev_ioctl_check_extension_generic(long arg)
2569{
2570        switch (arg) {
2571        case KVM_CAP_USER_MEMORY:
2572        case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
2573        case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
2574#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2575        case KVM_CAP_SET_BOOT_CPU_ID:
2576#endif
2577        case KVM_CAP_INTERNAL_ERROR_DATA:
2578#ifdef CONFIG_HAVE_KVM_MSI
2579        case KVM_CAP_SIGNAL_MSI:
2580#endif
2581#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2582        case KVM_CAP_IRQFD_RESAMPLE:
2583#endif
2584                return 1;
2585#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
2586        case KVM_CAP_IRQ_ROUTING:
2587                return KVM_MAX_IRQ_ROUTES;
2588#endif
2589        default:
2590                break;
2591        }
2592        return kvm_dev_ioctl_check_extension(arg);
2593}
2594
2595static long kvm_dev_ioctl(struct file *filp,
2596                          unsigned int ioctl, unsigned long arg)
2597{
2598        long r = -EINVAL;
2599
2600        switch (ioctl) {
2601        case KVM_GET_API_VERSION:
2602                r = -EINVAL;
2603                if (arg)
2604                        goto out;
2605                r = KVM_API_VERSION;
2606                break;
2607        case KVM_CREATE_VM:
2608                r = kvm_dev_ioctl_create_vm(arg);
2609                break;
2610        case KVM_CHECK_EXTENSION:
2611                r = kvm_dev_ioctl_check_extension_generic(arg);
2612                break;
2613        case KVM_GET_VCPU_MMAP_SIZE:
2614                r = -EINVAL;
2615                if (arg)
2616                        goto out;
2617                r = PAGE_SIZE;     /* struct kvm_run */
2618#ifdef CONFIG_X86
2619                r += PAGE_SIZE;    /* pio data page */
2620#endif
2621#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2622                r += PAGE_SIZE;    /* coalesced mmio ring page */
2623#endif
2624                break;
2625        case KVM_TRACE_ENABLE:
2626        case KVM_TRACE_PAUSE:
2627        case KVM_TRACE_DISABLE:
2628                r = -EOPNOTSUPP;
2629                break;
2630        default:
2631                return kvm_arch_dev_ioctl(filp, ioctl, arg);
2632        }
2633out:
2634        return r;
2635}
2636
2637static struct file_operations kvm_chardev_ops = {
2638        .unlocked_ioctl = kvm_dev_ioctl,
2639        .compat_ioctl   = kvm_dev_ioctl,
2640        .llseek         = noop_llseek,
2641};
2642
2643static struct miscdevice kvm_dev = {
2644        KVM_MINOR,
2645        "kvm",
2646        &kvm_chardev_ops,
2647};
2648
2649static void hardware_enable_nolock(void *junk)
2650{
2651        int cpu = raw_smp_processor_id();
2652        int r;
2653
2654        if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
2655                return;
2656
2657        cpumask_set_cpu(cpu, cpus_hardware_enabled);
2658
2659        r = kvm_arch_hardware_enable(NULL);
2660
2661        if (r) {
2662                cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2663                atomic_inc(&hardware_enable_failed);
2664                printk(KERN_INFO "kvm: enabling virtualization on "
2665                                 "CPU%d failed\n", cpu);
2666        }
2667}
2668
2669static void hardware_enable(void)
2670{
2671        raw_spin_lock(&kvm_count_lock);
2672        if (kvm_usage_count)
2673                hardware_enable_nolock(NULL);
2674        raw_spin_unlock(&kvm_count_lock);
2675}
2676
2677static void hardware_disable_nolock(void *junk)
2678{
2679        int cpu = raw_smp_processor_id();
2680
2681        if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
2682                return;
2683        cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2684        kvm_arch_hardware_disable(NULL);
2685}
2686
2687static void hardware_disable(void)
2688{
2689        raw_spin_lock(&kvm_count_lock);
2690        if (kvm_usage_count)
2691                hardware_disable_nolock(NULL);
2692        raw_spin_unlock(&kvm_count_lock);
2693}
2694
2695static void hardware_disable_all_nolock(void)
2696{
2697        BUG_ON(!kvm_usage_count);
2698
2699        kvm_usage_count--;
2700        if (!kvm_usage_count)
2701                on_each_cpu(hardware_disable_nolock, NULL, 1);
2702}
2703
2704static void hardware_disable_all(void)
2705{
2706        raw_spin_lock(&kvm_count_lock);
2707        hardware_disable_all_nolock();
2708        raw_spin_unlock(&kvm_count_lock);
2709}
2710
2711static int hardware_enable_all(void)
2712{
2713        int r = 0;
2714
2715        raw_spin_lock(&kvm_count_lock);
2716
2717        kvm_usage_count++;
2718        if (kvm_usage_count == 1) {
2719                atomic_set(&hardware_enable_failed, 0);
2720                on_each_cpu(hardware_enable_nolock, NULL, 1);
2721
2722                if (atomic_read(&hardware_enable_failed)) {
2723                        hardware_disable_all_nolock();
2724                        r = -EBUSY;
2725                }
2726        }
2727
2728        raw_spin_unlock(&kvm_count_lock);
2729
2730        return r;
2731}
2732
2733static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2734                           void *v)
2735{
2736        int cpu = (long)v;
2737
2738        val &= ~CPU_TASKS_FROZEN;
2739        switch (val) {
2740        case CPU_DYING:
2741                printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2742                       cpu);
2743                hardware_disable();
2744                break;
2745        case CPU_STARTING:
2746                printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2747                       cpu);
2748                hardware_enable();
2749                break;
2750        }
2751        return NOTIFY_OK;
2752}
2753
2754static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2755                      void *v)
2756{
2757        /*
2758         * Some (well, at least mine) BIOSes hang on reboot if
2759         * in vmx root mode.
2760         *
2761         * And Intel TXT required VMX off for all cpu when system shutdown.
2762         */
2763        printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2764        kvm_rebooting = true;
2765        on_each_cpu(hardware_disable_nolock, NULL, 1);
2766        return NOTIFY_OK;
2767}
2768
2769static struct notifier_block kvm_reboot_notifier = {
2770        .notifier_call = kvm_reboot,
2771        .priority = 0,
2772};
2773
2774static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2775{
2776        int i;
2777
2778        for (i = 0; i < bus->dev_count; i++) {
2779                struct kvm_io_device *pos = bus->range[i].dev;
2780
2781                kvm_iodevice_destructor(pos);
2782        }
2783        kfree(bus);
2784}
2785
2786static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
2787                                 const struct kvm_io_range *r2)
2788{
2789        if (r1->addr < r2->addr)
2790                return -1;
2791        if (r1->addr + r1->len > r2->addr + r2->len)
2792                return 1;
2793        return 0;
2794}
2795
2796static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
2797{
2798        return kvm_io_bus_cmp(p1, p2);
2799}
2800
2801static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
2802                          gpa_t addr, int len)
2803{
2804        bus->range[bus->dev_count++] = (struct kvm_io_range) {
2805                .addr = addr,
2806                .len = len,
2807                .dev = dev,
2808        };
2809
2810        sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range),
2811                kvm_io_bus_sort_cmp, NULL);
2812
2813        return 0;
2814}
2815
2816static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
2817                             gpa_t addr, int len)
2818{
2819        struct kvm_io_range *range, key;
2820        int off;
2821
2822        key = (struct kvm_io_range) {
2823                .addr = addr,
2824                .len = len,
2825        };
2826
2827        range = bsearch(&key, bus->range, bus->dev_count,
2828                        sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
2829        if (range == NULL)
2830                return -ENOENT;
2831
2832        off = range - bus->range;
2833
2834        while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
2835                off--;
2836
2837        return off;
2838}
2839
2840static int __kvm_io_bus_write(struct kvm_io_bus *bus,
2841                              struct kvm_io_range *range, const void *val)
2842{
2843        int idx;
2844
2845        idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
2846        if (idx < 0)
2847                return -EOPNOTSUPP;
2848
2849        while (idx < bus->dev_count &&
2850                kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
2851                if (!kvm_iodevice_write(bus->range[idx].dev, range->addr,
2852                                        range->len, val))
2853                        return idx;
2854                idx++;
2855        }
2856
2857        return -EOPNOTSUPP;
2858}
2859
2860/* kvm_io_bus_write - called under kvm->slots_lock */
2861int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2862                     int len, const void *val)
2863{
2864        struct kvm_io_bus *bus;
2865        struct kvm_io_range range;
2866        int r;
2867
2868        range = (struct kvm_io_range) {
2869                .addr = addr,
2870                .len = len,
2871        };
2872
2873        bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2874        r = __kvm_io_bus_write(bus, &range, val);
2875        return r < 0 ? r : 0;
2876}
2877
2878/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
2879int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2880                            int len, const void *val, long cookie)
2881{
2882        struct kvm_io_bus *bus;
2883        struct kvm_io_range range;
2884
2885        range = (struct kvm_io_range) {
2886                .addr = addr,
2887                .len = len,
2888        };
2889
2890        bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2891
2892        /* First try the device referenced by cookie. */
2893        if ((cookie >= 0) && (cookie < bus->dev_count) &&
2894            (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
2895                if (!kvm_iodevice_write(bus->range[cookie].dev, addr, len,
2896                                        val))
2897                        return cookie;
2898
2899        /*
2900         * cookie contained garbage; fall back to search and return the
2901         * correct cookie value.
2902         */
2903        return __kvm_io_bus_write(bus, &range, val);
2904}
2905
2906static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,
2907                             void *val)
2908{
2909        int idx;
2910
2911        idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
2912        if (idx < 0)
2913                return -EOPNOTSUPP;
2914
2915        while (idx < bus->dev_count &&
2916                kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
2917                if (!kvm_iodevice_read(bus->range[idx].dev, range->addr,
2918                                       range->len, val))
2919                        return idx;
2920                idx++;
2921        }
2922
2923        return -EOPNOTSUPP;
2924}
2925
2926/* kvm_io_bus_read - called under kvm->slots_lock */
2927int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2928                    int len, void *val)
2929{
2930        struct kvm_io_bus *bus;
2931        struct kvm_io_range range;
2932        int r;
2933
2934        range = (struct kvm_io_range) {
2935                .addr = addr,
2936                .len = len,
2937        };
2938
2939        bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2940        r = __kvm_io_bus_read(bus, &range, val);
2941        return r < 0 ? r : 0;
2942}
2943
2944
2945/* Caller must hold slots_lock. */
2946int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2947                            int len, struct kvm_io_device *dev)
2948{
2949        struct kvm_io_bus *new_bus, *bus;
2950
2951        bus = kvm->buses[bus_idx];
2952        /* exclude ioeventfd which is limited by maximum fd */
2953        if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
2954                return -ENOSPC;
2955
2956        new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
2957                          sizeof(struct kvm_io_range)), GFP_KERNEL);
2958        if (!new_bus)
2959                return -ENOMEM;
2960        memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
2961               sizeof(struct kvm_io_range)));
2962        kvm_io_bus_insert_dev(new_bus, dev, addr, len);
2963        rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2964        synchronize_srcu_expedited(&kvm->srcu);
2965        kfree(bus);
2966
2967        return 0;
2968}
2969
2970/* Caller must hold slots_lock. */
2971int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2972                              struct kvm_io_device *dev)
2973{
2974        int i, r;
2975        struct kvm_io_bus *new_bus, *bus;
2976
2977        bus = kvm->buses[bus_idx];
2978        r = -ENOENT;
2979        for (i = 0; i < bus->dev_count; i++)
2980                if (bus->range[i].dev == dev) {
2981                        r = 0;
2982                        break;
2983                }
2984
2985        if (r)
2986                return r;
2987
2988        new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
2989                          sizeof(struct kvm_io_range)), GFP_KERNEL);
2990        if (!new_bus)
2991                return -ENOMEM;
2992
2993        memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
2994        new_bus->dev_count--;
2995        memcpy(new_bus->range + i, bus->range + i + 1,
2996               (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
2997
2998        rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2999        synchronize_srcu_expedited(&kvm->srcu);
3000        kfree(bus);
3001        return r;
3002}
3003
3004static struct notifier_block kvm_cpu_notifier = {
3005        .notifier_call = kvm_cpu_hotplug,
3006};
3007
3008static int vm_stat_get(void *_offset, u64 *val)
3009{
3010        unsigned offset = (long)_offset;
3011        struct kvm *kvm;
3012
3013        *val = 0;
3014        spin_lock(&kvm_lock);
3015        list_for_each_entry(kvm, &vm_list, vm_list)
3016                *val += *(u32 *)((void *)kvm + offset);
3017        spin_unlock(&kvm_lock);
3018        return 0;
3019}
3020
3021DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
3022
3023static int vcpu_stat_get(void *_offset, u64 *val)
3024{
3025        unsigned offset = (long)_offset;
3026        struct kvm *kvm;
3027        struct kvm_vcpu *vcpu;
3028        int i;
3029
3030        *val = 0;
3031        spin_lock(&kvm_lock);
3032        list_for_each_entry(kvm, &vm_list, vm_list)
3033                kvm_for_each_vcpu(i, vcpu, kvm)
3034                        *val += *(u32 *)((void *)vcpu + offset);
3035
3036        spin_unlock(&kvm_lock);
3037        return 0;
3038}
3039
3040DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
3041
3042static const struct file_operations *stat_fops[] = {
3043        [KVM_STAT_VCPU] = &vcpu_stat_fops,
3044        [KVM_STAT_VM]   = &vm_stat_fops,
3045};
3046
3047static int kvm_init_debug(void)
3048{
3049        int r = -EEXIST;
3050        struct kvm_stats_debugfs_item *p;
3051
3052        kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
3053        if (kvm_debugfs_dir == NULL)
3054                goto out;
3055
3056        for (p = debugfs_entries; p->name; ++p) {
3057                p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
3058                                                (void *)(long)p->offset,
3059                                                stat_fops[p->kind]);
3060                if (p->dentry == NULL)
3061                        goto out_dir;
3062        }
3063
3064        return 0;
3065
3066out_dir:
3067        debugfs_remove_recursive(kvm_debugfs_dir);
3068out:
3069        return r;
3070}
3071
3072static void kvm_exit_debug(void)
3073{
3074        struct kvm_stats_debugfs_item *p;
3075
3076        for (p = debugfs_entries; p->name; ++p)
3077                debugfs_remove(p->dentry);
3078        debugfs_remove(kvm_debugfs_dir);
3079}
3080
3081static int kvm_suspend(void)
3082{
3083        if (kvm_usage_count)
3084                hardware_disable_nolock(NULL);
3085        return 0;
3086}
3087
3088static void kvm_resume(void)
3089{
3090        if (kvm_usage_count) {
3091                WARN_ON(raw_spin_is_locked(&kvm_count_lock));
3092                hardware_enable_nolock(NULL);
3093        }
3094}
3095
3096static struct syscore_ops kvm_syscore_ops = {
3097        .suspend = kvm_suspend,
3098        .resume = kvm_resume,
3099};
3100
3101static inline
3102struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
3103{
3104        return container_of(pn, struct kvm_vcpu, preempt_notifier);
3105}
3106
3107static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
3108{
3109        struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3110        if (vcpu->preempted)
3111                vcpu->preempted = false;
3112
3113        kvm_arch_vcpu_load(vcpu, cpu);
3114}
3115
3116static void kvm_sched_out(struct preempt_notifier *pn,
3117                          struct task_struct *next)
3118{
3119        struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3120
3121        if (current->state == TASK_RUNNING)
3122                vcpu->preempted = true;
3123        kvm_arch_vcpu_put(vcpu);
3124}
3125
3126int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
3127                  struct module *module)
3128{
3129        int r;
3130        int cpu;
3131
3132        r = kvm_arch_init(opaque);
3133        if (r)
3134                goto out_fail;
3135
3136        /*
3137         * kvm_arch_init makes sure there's at most one caller
3138         * for architectures that support multiple implementations,
3139         * like intel and amd on x86.
3140         * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
3141         * conflicts in case kvm is already setup for another implementation.
3142         */
3143        r = kvm_irqfd_init();
3144        if (r)
3145                goto out_irqfd;
3146
3147        if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
3148                r = -ENOMEM;
3149                goto out_free_0;
3150        }
3151
3152        r = kvm_arch_hardware_setup();
3153        if (r < 0)
3154                goto out_free_0a;
3155
3156        for_each_online_cpu(cpu) {
3157                smp_call_function_single(cpu,
3158                                kvm_arch_check_processor_compat,
3159                                &r, 1);
3160                if (r < 0)
3161                        goto out_free_1;
3162        }
3163
3164        r = register_cpu_notifier(&kvm_cpu_notifier);
3165        if (r)
3166                goto out_free_2;
3167        register_reboot_notifier(&kvm_reboot_notifier);
3168
3169        /* A kmem cache lets us meet the alignment requirements of fx_save. */
3170        if (!vcpu_align)
3171                vcpu_align = __alignof__(struct kvm_vcpu);
3172        kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
3173                                           0, NULL);
3174        if (!kvm_vcpu_cache) {
3175                r = -ENOMEM;
3176                goto out_free_3;
3177        }
3178
3179        r = kvm_async_pf_init();
3180        if (r)
3181                goto out_free;
3182
3183        kvm_chardev_ops.owner = module;
3184        kvm_vm_fops.owner = module;
3185        kvm_vcpu_fops.owner = module;
3186
3187        r = misc_register(&kvm_dev);
3188        if (r) {
3189                printk(KERN_ERR "kvm: misc device register failed\n");
3190                goto out_unreg;
3191        }
3192
3193        register_syscore_ops(&kvm_syscore_ops);
3194
3195        kvm_preempt_ops.sched_in = kvm_sched_in;
3196        kvm_preempt_ops.sched_out = kvm_sched_out;
3197
3198        r = kvm_init_debug();
3199        if (r) {
3200                printk(KERN_ERR "kvm: create debugfs files failed\n");
3201                goto out_undebugfs;
3202        }
3203
3204        return 0;
3205
3206out_undebugfs:
3207        unregister_syscore_ops(&kvm_syscore_ops);
3208        misc_deregister(&kvm_dev);
3209out_unreg:
3210        kvm_async_pf_deinit();
3211out_free:
3212        kmem_cache_destroy(kvm_vcpu_cache);
3213out_free_3:
3214        unregister_reboot_notifier(&kvm_reboot_notifier);
3215        unregister_cpu_notifier(&kvm_cpu_notifier);
3216out_free_2:
3217out_free_1:
3218        kvm_arch_hardware_unsetup();
3219out_free_0a:
3220        free_cpumask_var(cpus_hardware_enabled);
3221out_free_0:
3222        kvm_irqfd_exit();
3223out_irqfd:
3224        kvm_arch_exit();
3225out_fail:
3226        return r;
3227}
3228EXPORT_SYMBOL_GPL(kvm_init);
3229
3230void kvm_exit(void)
3231{
3232        kvm_exit_debug();
3233        misc_deregister(&kvm_dev);
3234        kmem_cache_destroy(kvm_vcpu_cache);
3235        kvm_async_pf_deinit();
3236        unregister_syscore_ops(&kvm_syscore_ops);
3237        unregister_reboot_notifier(&kvm_reboot_notifier);
3238        unregister_cpu_notifier(&kvm_cpu_notifier);
3239        on_each_cpu(hardware_disable_nolock, NULL, 1);
3240        kvm_arch_hardware_unsetup();
3241        kvm_arch_exit();
3242        kvm_irqfd_exit();
3243        free_cpumask_var(cpus_hardware_enabled);
3244}
3245EXPORT_SYMBOL_GPL(kvm_exit);
3246