linux/virt/kvm/kvm_main.c
<<
>>
Prefs
   1/*
   2 * Kernel-based Virtual Machine driver for Linux
   3 *
   4 * This module enables machines with Intel VT-x extensions to run virtual
   5 * machines without emulation or binary translation.
   6 *
   7 * Copyright (C) 2006 Qumranet, Inc.
   8 *
   9 * Authors:
  10 *   Avi Kivity   <avi@qumranet.com>
  11 *   Yaniv Kamay  <yaniv@qumranet.com>
  12 *
  13 * This work is licensed under the terms of the GNU GPL, version 2.  See
  14 * the COPYING file in the top-level directory.
  15 *
  16 */
  17
  18#include "iodev.h"
  19
  20#include <linux/kvm_host.h>
  21#include <linux/kvm.h>
  22#include <linux/module.h>
  23#include <linux/errno.h>
  24#include <linux/percpu.h>
  25#include <linux/gfp.h>
  26#include <linux/mm.h>
  27#include <linux/miscdevice.h>
  28#include <linux/vmalloc.h>
  29#include <linux/reboot.h>
  30#include <linux/debugfs.h>
  31#include <linux/highmem.h>
  32#include <linux/file.h>
  33#include <linux/sysdev.h>
  34#include <linux/cpu.h>
  35#include <linux/sched.h>
  36#include <linux/cpumask.h>
  37#include <linux/smp.h>
  38#include <linux/anon_inodes.h>
  39#include <linux/profile.h>
  40#include <linux/kvm_para.h>
  41#include <linux/pagemap.h>
  42#include <linux/mman.h>
  43#include <linux/swap.h>
  44#include <linux/bitops.h>
  45#include <linux/spinlock.h>
  46
  47#include <asm/processor.h>
  48#include <asm/io.h>
  49#include <asm/uaccess.h>
  50#include <asm/pgtable.h>
  51
  52#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
  53#include "coalesced_mmio.h"
  54#endif
  55
  56#ifdef KVM_CAP_DEVICE_ASSIGNMENT
  57#include <linux/pci.h>
  58#include <linux/interrupt.h>
  59#include "irq.h"
  60#endif
  61
  62#define CREATE_TRACE_POINTS
  63#include <trace/events/kvm.h>
  64
  65MODULE_AUTHOR("Qumranet");
  66MODULE_LICENSE("GPL");
  67
  68/*
  69 * Ordering of locks:
  70 *
  71 *              kvm->slots_lock --> kvm->lock --> kvm->irq_lock
  72 */
  73
  74DEFINE_SPINLOCK(kvm_lock);
  75LIST_HEAD(vm_list);
  76
  77static cpumask_var_t cpus_hardware_enabled;
  78
  79struct kmem_cache *kvm_vcpu_cache;
  80EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
  81
  82static __read_mostly struct preempt_ops kvm_preempt_ops;
  83
  84struct dentry *kvm_debugfs_dir;
  85
  86static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
  87                           unsigned long arg);
  88
  89static bool kvm_rebooting;
  90
  91static bool largepages_enabled = true;
  92
  93#ifdef KVM_CAP_DEVICE_ASSIGNMENT
  94static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
  95                                                      int assigned_dev_id)
  96{
  97        struct list_head *ptr;
  98        struct kvm_assigned_dev_kernel *match;
  99
 100        list_for_each(ptr, head) {
 101                match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
 102                if (match->assigned_dev_id == assigned_dev_id)
 103                        return match;
 104        }
 105        return NULL;
 106}
 107
 108static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
 109                                    *assigned_dev, int irq)
 110{
 111        int i, index;
 112        struct msix_entry *host_msix_entries;
 113
 114        host_msix_entries = assigned_dev->host_msix_entries;
 115
 116        index = -1;
 117        for (i = 0; i < assigned_dev->entries_nr; i++)
 118                if (irq == host_msix_entries[i].vector) {
 119                        index = i;
 120                        break;
 121                }
 122        if (index < 0) {
 123                printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
 124                return 0;
 125        }
 126
 127        return index;
 128}
 129
 130static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
 131{
 132        struct kvm_assigned_dev_kernel *assigned_dev;
 133        struct kvm *kvm;
 134        int i;
 135
 136        assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
 137                                    interrupt_work);
 138        kvm = assigned_dev->kvm;
 139
 140        mutex_lock(&kvm->irq_lock);
 141        spin_lock_irq(&assigned_dev->assigned_dev_lock);
 142        if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
 143                struct kvm_guest_msix_entry *guest_entries =
 144                        assigned_dev->guest_msix_entries;
 145                for (i = 0; i < assigned_dev->entries_nr; i++) {
 146                        if (!(guest_entries[i].flags &
 147                                        KVM_ASSIGNED_MSIX_PENDING))
 148                                continue;
 149                        guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
 150                        kvm_set_irq(assigned_dev->kvm,
 151                                    assigned_dev->irq_source_id,
 152                                    guest_entries[i].vector, 1);
 153                }
 154        } else
 155                kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
 156                            assigned_dev->guest_irq, 1);
 157
 158        spin_unlock_irq(&assigned_dev->assigned_dev_lock);
 159        mutex_unlock(&assigned_dev->kvm->irq_lock);
 160}
 161
 162static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
 163{
 164        unsigned long flags;
 165        struct kvm_assigned_dev_kernel *assigned_dev =
 166                (struct kvm_assigned_dev_kernel *) dev_id;
 167
 168        spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
 169        if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
 170                int index = find_index_from_host_irq(assigned_dev, irq);
 171                if (index < 0)
 172                        goto out;
 173                assigned_dev->guest_msix_entries[index].flags |=
 174                        KVM_ASSIGNED_MSIX_PENDING;
 175        }
 176
 177        schedule_work(&assigned_dev->interrupt_work);
 178
 179        if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
 180                disable_irq_nosync(irq);
 181                assigned_dev->host_irq_disabled = true;
 182        }
 183
 184out:
 185        spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
 186        return IRQ_HANDLED;
 187}
 188
 189/* Ack the irq line for an assigned device */
 190static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
 191{
 192        struct kvm_assigned_dev_kernel *dev;
 193        unsigned long flags;
 194
 195        if (kian->gsi == -1)
 196                return;
 197
 198        dev = container_of(kian, struct kvm_assigned_dev_kernel,
 199                           ack_notifier);
 200
 201        kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
 202
 203        /* The guest irq may be shared so this ack may be
 204         * from another device.
 205         */
 206        spin_lock_irqsave(&dev->assigned_dev_lock, flags);
 207        if (dev->host_irq_disabled) {
 208                enable_irq(dev->host_irq);
 209                dev->host_irq_disabled = false;
 210        }
 211        spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
 212}
 213
 214static void deassign_guest_irq(struct kvm *kvm,
 215                               struct kvm_assigned_dev_kernel *assigned_dev)
 216{
 217        kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
 218        assigned_dev->ack_notifier.gsi = -1;
 219
 220        if (assigned_dev->irq_source_id != -1)
 221                kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
 222        assigned_dev->irq_source_id = -1;
 223        assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
 224}
 225
 226/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
 227static void deassign_host_irq(struct kvm *kvm,
 228                              struct kvm_assigned_dev_kernel *assigned_dev)
 229{
 230        /*
 231         * In kvm_free_device_irq, cancel_work_sync return true if:
 232         * 1. work is scheduled, and then cancelled.
 233         * 2. work callback is executed.
 234         *
 235         * The first one ensured that the irq is disabled and no more events
 236         * would happen. But for the second one, the irq may be enabled (e.g.
 237         * for MSI). So we disable irq here to prevent further events.
 238         *
 239         * Notice this maybe result in nested disable if the interrupt type is
 240         * INTx, but it's OK for we are going to free it.
 241         *
 242         * If this function is a part of VM destroy, please ensure that till
 243         * now, the kvm state is still legal for probably we also have to wait
 244         * interrupt_work done.
 245         */
 246        if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
 247                int i;
 248                for (i = 0; i < assigned_dev->entries_nr; i++)
 249                        disable_irq_nosync(assigned_dev->
 250                                           host_msix_entries[i].vector);
 251
 252                cancel_work_sync(&assigned_dev->interrupt_work);
 253
 254                for (i = 0; i < assigned_dev->entries_nr; i++)
 255                        free_irq(assigned_dev->host_msix_entries[i].vector,
 256                                 (void *)assigned_dev);
 257
 258                assigned_dev->entries_nr = 0;
 259                kfree(assigned_dev->host_msix_entries);
 260                kfree(assigned_dev->guest_msix_entries);
 261                pci_disable_msix(assigned_dev->dev);
 262        } else {
 263                /* Deal with MSI and INTx */
 264                disable_irq_nosync(assigned_dev->host_irq);
 265                cancel_work_sync(&assigned_dev->interrupt_work);
 266
 267                free_irq(assigned_dev->host_irq, (void *)assigned_dev);
 268
 269                if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
 270                        pci_disable_msi(assigned_dev->dev);
 271        }
 272
 273        assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
 274}
 275
 276static int kvm_deassign_irq(struct kvm *kvm,
 277                            struct kvm_assigned_dev_kernel *assigned_dev,
 278                            unsigned long irq_requested_type)
 279{
 280        unsigned long guest_irq_type, host_irq_type;
 281
 282        if (!irqchip_in_kernel(kvm))
 283                return -EINVAL;
 284        /* no irq assignment to deassign */
 285        if (!assigned_dev->irq_requested_type)
 286                return -ENXIO;
 287
 288        host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
 289        guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
 290
 291        if (host_irq_type)
 292                deassign_host_irq(kvm, assigned_dev);
 293        if (guest_irq_type)
 294                deassign_guest_irq(kvm, assigned_dev);
 295
 296        return 0;
 297}
 298
 299static void kvm_free_assigned_irq(struct kvm *kvm,
 300                                  struct kvm_assigned_dev_kernel *assigned_dev)
 301{
 302        kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
 303}
 304
 305static void kvm_free_assigned_device(struct kvm *kvm,
 306                                     struct kvm_assigned_dev_kernel
 307                                     *assigned_dev)
 308{
 309        kvm_free_assigned_irq(kvm, assigned_dev);
 310
 311        pci_reset_function(assigned_dev->dev);
 312
 313        pci_release_regions(assigned_dev->dev);
 314        pci_disable_device(assigned_dev->dev);
 315        pci_dev_put(assigned_dev->dev);
 316
 317        list_del(&assigned_dev->list);
 318        kfree(assigned_dev);
 319}
 320
 321void kvm_free_all_assigned_devices(struct kvm *kvm)
 322{
 323        struct list_head *ptr, *ptr2;
 324        struct kvm_assigned_dev_kernel *assigned_dev;
 325
 326        list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
 327                assigned_dev = list_entry(ptr,
 328                                          struct kvm_assigned_dev_kernel,
 329                                          list);
 330
 331                kvm_free_assigned_device(kvm, assigned_dev);
 332        }
 333}
 334
 335static int assigned_device_enable_host_intx(struct kvm *kvm,
 336                                            struct kvm_assigned_dev_kernel *dev)
 337{
 338        dev->host_irq = dev->dev->irq;
 339        /* Even though this is PCI, we don't want to use shared
 340         * interrupts. Sharing host devices with guest-assigned devices
 341         * on the same interrupt line is not a happy situation: there
 342         * are going to be long delays in accepting, acking, etc.
 343         */
 344        if (request_irq(dev->host_irq, kvm_assigned_dev_intr,
 345                        0, "kvm_assigned_intx_device", (void *)dev))
 346                return -EIO;
 347        return 0;
 348}
 349
 350#ifdef __KVM_HAVE_MSI
 351static int assigned_device_enable_host_msi(struct kvm *kvm,
 352                                           struct kvm_assigned_dev_kernel *dev)
 353{
 354        int r;
 355
 356        if (!dev->dev->msi_enabled) {
 357                r = pci_enable_msi(dev->dev);
 358                if (r)
 359                        return r;
 360        }
 361
 362        dev->host_irq = dev->dev->irq;
 363        if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0,
 364                        "kvm_assigned_msi_device", (void *)dev)) {
 365                pci_disable_msi(dev->dev);
 366                return -EIO;
 367        }
 368
 369        return 0;
 370}
 371#endif
 372
 373#ifdef __KVM_HAVE_MSIX
 374static int assigned_device_enable_host_msix(struct kvm *kvm,
 375                                            struct kvm_assigned_dev_kernel *dev)
 376{
 377        int i, r = -EINVAL;
 378
 379        /* host_msix_entries and guest_msix_entries should have been
 380         * initialized */
 381        if (dev->entries_nr == 0)
 382                return r;
 383
 384        r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
 385        if (r)
 386                return r;
 387
 388        for (i = 0; i < dev->entries_nr; i++) {
 389                r = request_irq(dev->host_msix_entries[i].vector,
 390                                kvm_assigned_dev_intr, 0,
 391                                "kvm_assigned_msix_device",
 392                                (void *)dev);
 393                /* FIXME: free requested_irq's on failure */
 394                if (r)
 395                        return r;
 396        }
 397
 398        return 0;
 399}
 400
 401#endif
 402
 403static int assigned_device_enable_guest_intx(struct kvm *kvm,
 404                                struct kvm_assigned_dev_kernel *dev,
 405                                struct kvm_assigned_irq *irq)
 406{
 407        dev->guest_irq = irq->guest_irq;
 408        dev->ack_notifier.gsi = irq->guest_irq;
 409        return 0;
 410}
 411
 412#ifdef __KVM_HAVE_MSI
 413static int assigned_device_enable_guest_msi(struct kvm *kvm,
 414                        struct kvm_assigned_dev_kernel *dev,
 415                        struct kvm_assigned_irq *irq)
 416{
 417        dev->guest_irq = irq->guest_irq;
 418        dev->ack_notifier.gsi = -1;
 419        dev->host_irq_disabled = false;
 420        return 0;
 421}
 422#endif
 423#ifdef __KVM_HAVE_MSIX
 424static int assigned_device_enable_guest_msix(struct kvm *kvm,
 425                        struct kvm_assigned_dev_kernel *dev,
 426                        struct kvm_assigned_irq *irq)
 427{
 428        dev->guest_irq = irq->guest_irq;
 429        dev->ack_notifier.gsi = -1;
 430        dev->host_irq_disabled = false;
 431        return 0;
 432}
 433#endif
 434
 435static int assign_host_irq(struct kvm *kvm,
 436                           struct kvm_assigned_dev_kernel *dev,
 437                           __u32 host_irq_type)
 438{
 439        int r = -EEXIST;
 440
 441        if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
 442                return r;
 443
 444        switch (host_irq_type) {
 445        case KVM_DEV_IRQ_HOST_INTX:
 446                r = assigned_device_enable_host_intx(kvm, dev);
 447                break;
 448#ifdef __KVM_HAVE_MSI
 449        case KVM_DEV_IRQ_HOST_MSI:
 450                r = assigned_device_enable_host_msi(kvm, dev);
 451                break;
 452#endif
 453#ifdef __KVM_HAVE_MSIX
 454        case KVM_DEV_IRQ_HOST_MSIX:
 455                r = assigned_device_enable_host_msix(kvm, dev);
 456                break;
 457#endif
 458        default:
 459                r = -EINVAL;
 460        }
 461
 462        if (!r)
 463                dev->irq_requested_type |= host_irq_type;
 464
 465        return r;
 466}
 467
 468static int assign_guest_irq(struct kvm *kvm,
 469                            struct kvm_assigned_dev_kernel *dev,
 470                            struct kvm_assigned_irq *irq,
 471                            unsigned long guest_irq_type)
 472{
 473        int id;
 474        int r = -EEXIST;
 475
 476        if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
 477                return r;
 478
 479        id = kvm_request_irq_source_id(kvm);
 480        if (id < 0)
 481                return id;
 482
 483        dev->irq_source_id = id;
 484
 485        switch (guest_irq_type) {
 486        case KVM_DEV_IRQ_GUEST_INTX:
 487                r = assigned_device_enable_guest_intx(kvm, dev, irq);
 488                break;
 489#ifdef __KVM_HAVE_MSI
 490        case KVM_DEV_IRQ_GUEST_MSI:
 491                r = assigned_device_enable_guest_msi(kvm, dev, irq);
 492                break;
 493#endif
 494#ifdef __KVM_HAVE_MSIX
 495        case KVM_DEV_IRQ_GUEST_MSIX:
 496                r = assigned_device_enable_guest_msix(kvm, dev, irq);
 497                break;
 498#endif
 499        default:
 500                r = -EINVAL;
 501        }
 502
 503        if (!r) {
 504                dev->irq_requested_type |= guest_irq_type;
 505                kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
 506        } else
 507                kvm_free_irq_source_id(kvm, dev->irq_source_id);
 508
 509        return r;
 510}
 511
 512/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
 513static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
 514                                   struct kvm_assigned_irq *assigned_irq)
 515{
 516        int r = -EINVAL;
 517        struct kvm_assigned_dev_kernel *match;
 518        unsigned long host_irq_type, guest_irq_type;
 519
 520        if (!capable(CAP_SYS_RAWIO))
 521                return -EPERM;
 522
 523        if (!irqchip_in_kernel(kvm))
 524                return r;
 525
 526        mutex_lock(&kvm->lock);
 527        r = -ENODEV;
 528        match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
 529                                      assigned_irq->assigned_dev_id);
 530        if (!match)
 531                goto out;
 532
 533        host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
 534        guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
 535
 536        r = -EINVAL;
 537        /* can only assign one type at a time */
 538        if (hweight_long(host_irq_type) > 1)
 539                goto out;
 540        if (hweight_long(guest_irq_type) > 1)
 541                goto out;
 542        if (host_irq_type == 0 && guest_irq_type == 0)
 543                goto out;
 544
 545        r = 0;
 546        if (host_irq_type)
 547                r = assign_host_irq(kvm, match, host_irq_type);
 548        if (r)
 549                goto out;
 550
 551        if (guest_irq_type)
 552                r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
 553out:
 554        mutex_unlock(&kvm->lock);
 555        return r;
 556}
 557
 558static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
 559                                         struct kvm_assigned_irq
 560                                         *assigned_irq)
 561{
 562        int r = -ENODEV;
 563        struct kvm_assigned_dev_kernel *match;
 564
 565        mutex_lock(&kvm->lock);
 566
 567        match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
 568                                      assigned_irq->assigned_dev_id);
 569        if (!match)
 570                goto out;
 571
 572        r = kvm_deassign_irq(kvm, match, assigned_irq->flags);
 573out:
 574        mutex_unlock(&kvm->lock);
 575        return r;
 576}
 577
 578static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
 579                                      struct kvm_assigned_pci_dev *assigned_dev)
 580{
 581        int r = 0;
 582        struct kvm_assigned_dev_kernel *match;
 583        struct pci_dev *dev;
 584
 585        down_read(&kvm->slots_lock);
 586        mutex_lock(&kvm->lock);
 587
 588        match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
 589                                      assigned_dev->assigned_dev_id);
 590        if (match) {
 591                /* device already assigned */
 592                r = -EEXIST;
 593                goto out;
 594        }
 595
 596        match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
 597        if (match == NULL) {
 598                printk(KERN_INFO "%s: Couldn't allocate memory\n",
 599                       __func__);
 600                r = -ENOMEM;
 601                goto out;
 602        }
 603        dev = pci_get_bus_and_slot(assigned_dev->busnr,
 604                                   assigned_dev->devfn);
 605        if (!dev) {
 606                printk(KERN_INFO "%s: host device not found\n", __func__);
 607                r = -EINVAL;
 608                goto out_free;
 609        }
 610        if (pci_enable_device(dev)) {
 611                printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
 612                r = -EBUSY;
 613                goto out_put;
 614        }
 615        r = pci_request_regions(dev, "kvm_assigned_device");
 616        if (r) {
 617                printk(KERN_INFO "%s: Could not get access to device regions\n",
 618                       __func__);
 619                goto out_disable;
 620        }
 621
 622        pci_reset_function(dev);
 623
 624        match->assigned_dev_id = assigned_dev->assigned_dev_id;
 625        match->host_busnr = assigned_dev->busnr;
 626        match->host_devfn = assigned_dev->devfn;
 627        match->flags = assigned_dev->flags;
 628        match->dev = dev;
 629        spin_lock_init(&match->assigned_dev_lock);
 630        match->irq_source_id = -1;
 631        match->kvm = kvm;
 632        match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
 633        INIT_WORK(&match->interrupt_work,
 634                  kvm_assigned_dev_interrupt_work_handler);
 635
 636        list_add(&match->list, &kvm->arch.assigned_dev_head);
 637
 638        if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
 639                if (!kvm->arch.iommu_domain) {
 640                        r = kvm_iommu_map_guest(kvm);
 641                        if (r)
 642                                goto out_list_del;
 643                }
 644                r = kvm_assign_device(kvm, match);
 645                if (r)
 646                        goto out_list_del;
 647        }
 648
 649out:
 650        mutex_unlock(&kvm->lock);
 651        up_read(&kvm->slots_lock);
 652        return r;
 653out_list_del:
 654        list_del(&match->list);
 655        pci_release_regions(dev);
 656out_disable:
 657        pci_disable_device(dev);
 658out_put:
 659        pci_dev_put(dev);
 660out_free:
 661        kfree(match);
 662        mutex_unlock(&kvm->lock);
 663        up_read(&kvm->slots_lock);
 664        return r;
 665}
 666#endif
 667
 668#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
 669static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
 670                struct kvm_assigned_pci_dev *assigned_dev)
 671{
 672        int r = 0;
 673        struct kvm_assigned_dev_kernel *match;
 674
 675        mutex_lock(&kvm->lock);
 676
 677        match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
 678                                      assigned_dev->assigned_dev_id);
 679        if (!match) {
 680                printk(KERN_INFO "%s: device hasn't been assigned before, "
 681                  "so cannot be deassigned\n", __func__);
 682                r = -EINVAL;
 683                goto out;
 684        }
 685
 686        if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
 687                kvm_deassign_device(kvm, match);
 688
 689        kvm_free_assigned_device(kvm, match);
 690
 691out:
 692        mutex_unlock(&kvm->lock);
 693        return r;
 694}
 695#endif
 696
 697inline int kvm_is_mmio_pfn(pfn_t pfn)
 698{
 699        if (pfn_valid(pfn)) {
 700                struct page *page = compound_head(pfn_to_page(pfn));
 701                return PageReserved(page);
 702        }
 703
 704        return true;
 705}
 706
 707/*
 708 * Switches to specified vcpu, until a matching vcpu_put()
 709 */
 710void vcpu_load(struct kvm_vcpu *vcpu)
 711{
 712        int cpu;
 713
 714        mutex_lock(&vcpu->mutex);
 715        cpu = get_cpu();
 716        preempt_notifier_register(&vcpu->preempt_notifier);
 717        kvm_arch_vcpu_load(vcpu, cpu);
 718        put_cpu();
 719}
 720
 721void vcpu_put(struct kvm_vcpu *vcpu)
 722{
 723        preempt_disable();
 724        kvm_arch_vcpu_put(vcpu);
 725        preempt_notifier_unregister(&vcpu->preempt_notifier);
 726        preempt_enable();
 727        mutex_unlock(&vcpu->mutex);
 728}
 729
 730static void ack_flush(void *_completed)
 731{
 732}
 733
 734static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
 735{
 736        int i, cpu, me;
 737        cpumask_var_t cpus;
 738        bool called = true;
 739        struct kvm_vcpu *vcpu;
 740
 741        zalloc_cpumask_var(&cpus, GFP_ATOMIC);
 742
 743        spin_lock(&kvm->requests_lock);
 744        me = smp_processor_id();
 745        kvm_for_each_vcpu(i, vcpu, kvm) {
 746                if (test_and_set_bit(req, &vcpu->requests))
 747                        continue;
 748                cpu = vcpu->cpu;
 749                if (cpus != NULL && cpu != -1 && cpu != me)
 750                        cpumask_set_cpu(cpu, cpus);
 751        }
 752        if (unlikely(cpus == NULL))
 753                smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
 754        else if (!cpumask_empty(cpus))
 755                smp_call_function_many(cpus, ack_flush, NULL, 1);
 756        else
 757                called = false;
 758        spin_unlock(&kvm->requests_lock);
 759        free_cpumask_var(cpus);
 760        return called;
 761}
 762
 763void kvm_flush_remote_tlbs(struct kvm *kvm)
 764{
 765        if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
 766                ++kvm->stat.remote_tlb_flush;
 767}
 768
 769void kvm_reload_remote_mmus(struct kvm *kvm)
 770{
 771        make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
 772}
 773
 774int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 775{
 776        struct page *page;
 777        int r;
 778
 779        mutex_init(&vcpu->mutex);
 780        vcpu->cpu = -1;
 781        vcpu->kvm = kvm;
 782        vcpu->vcpu_id = id;
 783        init_waitqueue_head(&vcpu->wq);
 784
 785        page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 786        if (!page) {
 787                r = -ENOMEM;
 788                goto fail;
 789        }
 790        vcpu->run = page_address(page);
 791
 792        r = kvm_arch_vcpu_init(vcpu);
 793        if (r < 0)
 794                goto fail_free_run;
 795        return 0;
 796
 797fail_free_run:
 798        free_page((unsigned long)vcpu->run);
 799fail:
 800        return r;
 801}
 802EXPORT_SYMBOL_GPL(kvm_vcpu_init);
 803
 804void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
 805{
 806        kvm_arch_vcpu_uninit(vcpu);
 807        free_page((unsigned long)vcpu->run);
 808}
 809EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
 810
 811#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 812static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
 813{
 814        return container_of(mn, struct kvm, mmu_notifier);
 815}
 816
 817static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
 818                                             struct mm_struct *mm,
 819                                             unsigned long address)
 820{
 821        struct kvm *kvm = mmu_notifier_to_kvm(mn);
 822        int need_tlb_flush;
 823
 824        /*
 825         * When ->invalidate_page runs, the linux pte has been zapped
 826         * already but the page is still allocated until
 827         * ->invalidate_page returns. So if we increase the sequence
 828         * here the kvm page fault will notice if the spte can't be
 829         * established because the page is going to be freed. If
 830         * instead the kvm page fault establishes the spte before
 831         * ->invalidate_page runs, kvm_unmap_hva will release it
 832         * before returning.
 833         *
 834         * The sequence increase only need to be seen at spin_unlock
 835         * time, and not at spin_lock time.
 836         *
 837         * Increasing the sequence after the spin_unlock would be
 838         * unsafe because the kvm page fault could then establish the
 839         * pte after kvm_unmap_hva returned, without noticing the page
 840         * is going to be freed.
 841         */
 842        spin_lock(&kvm->mmu_lock);
 843        kvm->mmu_notifier_seq++;
 844        need_tlb_flush = kvm_unmap_hva(kvm, address);
 845        spin_unlock(&kvm->mmu_lock);
 846
 847        /* we've to flush the tlb before the pages can be freed */
 848        if (need_tlb_flush)
 849                kvm_flush_remote_tlbs(kvm);
 850
 851}
 852
 853static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
 854                                        struct mm_struct *mm,
 855                                        unsigned long address,
 856                                        pte_t pte)
 857{
 858        struct kvm *kvm = mmu_notifier_to_kvm(mn);
 859
 860        spin_lock(&kvm->mmu_lock);
 861        kvm->mmu_notifier_seq++;
 862        kvm_set_spte_hva(kvm, address, pte);
 863        spin_unlock(&kvm->mmu_lock);
 864}
 865
 866static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
 867                                                    struct mm_struct *mm,
 868                                                    unsigned long start,
 869                                                    unsigned long end)
 870{
 871        struct kvm *kvm = mmu_notifier_to_kvm(mn);
 872        int need_tlb_flush = 0;
 873
 874        spin_lock(&kvm->mmu_lock);
 875        /*
 876         * The count increase must become visible at unlock time as no
 877         * spte can be established without taking the mmu_lock and
 878         * count is also read inside the mmu_lock critical section.
 879         */
 880        kvm->mmu_notifier_count++;
 881        for (; start < end; start += PAGE_SIZE)
 882                need_tlb_flush |= kvm_unmap_hva(kvm, start);
 883        spin_unlock(&kvm->mmu_lock);
 884
 885        /* we've to flush the tlb before the pages can be freed */
 886        if (need_tlb_flush)
 887                kvm_flush_remote_tlbs(kvm);
 888}
 889
 890static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
 891                                                  struct mm_struct *mm,
 892                                                  unsigned long start,
 893                                                  unsigned long end)
 894{
 895        struct kvm *kvm = mmu_notifier_to_kvm(mn);
 896
 897        spin_lock(&kvm->mmu_lock);
 898        /*
 899         * This sequence increase will notify the kvm page fault that
 900         * the page that is going to be mapped in the spte could have
 901         * been freed.
 902         */
 903        kvm->mmu_notifier_seq++;
 904        /*
 905         * The above sequence increase must be visible before the
 906         * below count decrease but both values are read by the kvm
 907         * page fault under mmu_lock spinlock so we don't need to add
 908         * a smb_wmb() here in between the two.
 909         */
 910        kvm->mmu_notifier_count--;
 911        spin_unlock(&kvm->mmu_lock);
 912
 913        BUG_ON(kvm->mmu_notifier_count < 0);
 914}
 915
 916static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
 917                                              struct mm_struct *mm,
 918                                              unsigned long address)
 919{
 920        struct kvm *kvm = mmu_notifier_to_kvm(mn);
 921        int young;
 922
 923        spin_lock(&kvm->mmu_lock);
 924        young = kvm_age_hva(kvm, address);
 925        spin_unlock(&kvm->mmu_lock);
 926
 927        if (young)
 928                kvm_flush_remote_tlbs(kvm);
 929
 930        return young;
 931}
 932
 933static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
 934                                     struct mm_struct *mm)
 935{
 936        struct kvm *kvm = mmu_notifier_to_kvm(mn);
 937        kvm_arch_flush_shadow(kvm);
 938}
 939
 940static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
 941        .invalidate_page        = kvm_mmu_notifier_invalidate_page,
 942        .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
 943        .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
 944        .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
 945        .change_pte             = kvm_mmu_notifier_change_pte,
 946        .release                = kvm_mmu_notifier_release,
 947};
 948#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
 949
 950static struct kvm *kvm_create_vm(void)
 951{
 952        struct kvm *kvm = kvm_arch_create_vm();
 953#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
 954        struct page *page;
 955#endif
 956
 957        if (IS_ERR(kvm))
 958                goto out;
 959#ifdef CONFIG_HAVE_KVM_IRQCHIP
 960        INIT_LIST_HEAD(&kvm->irq_routing);
 961        INIT_HLIST_HEAD(&kvm->mask_notifier_list);
 962#endif
 963
 964#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
 965        page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 966        if (!page) {
 967                kfree(kvm);
 968                return ERR_PTR(-ENOMEM);
 969        }
 970        kvm->coalesced_mmio_ring =
 971                        (struct kvm_coalesced_mmio_ring *)page_address(page);
 972#endif
 973
 974#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 975        {
 976                int err;
 977                kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
 978                err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
 979                if (err) {
 980#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
 981                        put_page(page);
 982#endif
 983                        kfree(kvm);
 984                        return ERR_PTR(err);
 985                }
 986        }
 987#endif
 988
 989        kvm->mm = current->mm;
 990        atomic_inc(&kvm->mm->mm_count);
 991        spin_lock_init(&kvm->mmu_lock);
 992        spin_lock_init(&kvm->requests_lock);
 993        kvm_io_bus_init(&kvm->pio_bus);
 994        kvm_eventfd_init(kvm);
 995        mutex_init(&kvm->lock);
 996        mutex_init(&kvm->irq_lock);
 997        kvm_io_bus_init(&kvm->mmio_bus);
 998        init_rwsem(&kvm->slots_lock);
 999        atomic_set(&kvm->users_count, 1);
1000        spin_lock(&kvm_lock);
1001        list_add(&kvm->vm_list, &vm_list);
1002        spin_unlock(&kvm_lock);
1003#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1004        kvm_coalesced_mmio_init(kvm);
1005#endif
1006out:
1007        return kvm;
1008}
1009
1010/*
1011 * Free any memory in @free but not in @dont.
1012 */
1013static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
1014                                  struct kvm_memory_slot *dont)
1015{
1016        int i;
1017
1018        if (!dont || free->rmap != dont->rmap)
1019                vfree(free->rmap);
1020
1021        if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
1022                vfree(free->dirty_bitmap);
1023
1024
1025        for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
1026                if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
1027                        vfree(free->lpage_info[i]);
1028                        free->lpage_info[i] = NULL;
1029                }
1030        }
1031
1032        free->npages = 0;
1033        free->dirty_bitmap = NULL;
1034        free->rmap = NULL;
1035}
1036
1037void kvm_free_physmem(struct kvm *kvm)
1038{
1039        int i;
1040
1041        for (i = 0; i < kvm->nmemslots; ++i)
1042                kvm_free_physmem_slot(&kvm->memslots[i], NULL);
1043}
1044
1045static void kvm_destroy_vm(struct kvm *kvm)
1046{
1047        struct mm_struct *mm = kvm->mm;
1048
1049        kvm_arch_sync_events(kvm);
1050        spin_lock(&kvm_lock);
1051        list_del(&kvm->vm_list);
1052        spin_unlock(&kvm_lock);
1053        kvm_free_irq_routing(kvm);
1054        kvm_io_bus_destroy(&kvm->pio_bus);
1055        kvm_io_bus_destroy(&kvm->mmio_bus);
1056#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1057        if (kvm->coalesced_mmio_ring != NULL)
1058                free_page((unsigned long)kvm->coalesced_mmio_ring);
1059#endif
1060#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1061        mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1062#else
1063        kvm_arch_flush_shadow(kvm);
1064#endif
1065        kvm_arch_destroy_vm(kvm);
1066        mmdrop(mm);
1067}
1068
1069void kvm_get_kvm(struct kvm *kvm)
1070{
1071        atomic_inc(&kvm->users_count);
1072}
1073EXPORT_SYMBOL_GPL(kvm_get_kvm);
1074
1075void kvm_put_kvm(struct kvm *kvm)
1076{
1077        if (atomic_dec_and_test(&kvm->users_count))
1078                kvm_destroy_vm(kvm);
1079}
1080EXPORT_SYMBOL_GPL(kvm_put_kvm);
1081
1082
1083static int kvm_vm_release(struct inode *inode, struct file *filp)
1084{
1085        struct kvm *kvm = filp->private_data;
1086
1087        kvm_irqfd_release(kvm);
1088
1089        kvm_put_kvm(kvm);
1090        return 0;
1091}
1092
1093/*
1094 * Allocate some memory and give it an address in the guest physical address
1095 * space.
1096 *
1097 * Discontiguous memory is allowed, mostly for framebuffers.
1098 *
1099 * Must be called holding mmap_sem for write.
1100 */
1101int __kvm_set_memory_region(struct kvm *kvm,
1102                            struct kvm_userspace_memory_region *mem,
1103                            int user_alloc)
1104{
1105        int r;
1106        gfn_t base_gfn;
1107        unsigned long npages;
1108        unsigned long i;
1109        struct kvm_memory_slot *memslot;
1110        struct kvm_memory_slot old, new;
1111
1112        r = -EINVAL;
1113        /* General sanity checks */
1114        if (mem->memory_size & (PAGE_SIZE - 1))
1115                goto out;
1116        if (mem->guest_phys_addr & (PAGE_SIZE - 1))
1117                goto out;
1118        if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
1119                goto out;
1120        if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
1121                goto out;
1122        if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
1123                goto out;
1124
1125        memslot = &kvm->memslots[mem->slot];
1126        base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
1127        npages = mem->memory_size >> PAGE_SHIFT;
1128
1129        if (!npages)
1130                mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1131
1132        new = old = *memslot;
1133
1134        new.base_gfn = base_gfn;
1135        new.npages = npages;
1136        new.flags = mem->flags;
1137
1138        /* Disallow changing a memory slot's size. */
1139        r = -EINVAL;
1140        if (npages && old.npages && npages != old.npages)
1141                goto out_free;
1142
1143        /* Check for overlaps */
1144        r = -EEXIST;
1145        for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1146                struct kvm_memory_slot *s = &kvm->memslots[i];
1147
1148                if (s == memslot || !s->npages)
1149                        continue;
1150                if (!((base_gfn + npages <= s->base_gfn) ||
1151                      (base_gfn >= s->base_gfn + s->npages)))
1152                        goto out_free;
1153        }
1154
1155        /* Free page dirty bitmap if unneeded */
1156        if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
1157                new.dirty_bitmap = NULL;
1158
1159        r = -ENOMEM;
1160
1161        /* Allocate if a slot is being created */
1162#ifndef CONFIG_S390
1163        if (npages && !new.rmap) {
1164                new.rmap = vmalloc(npages * sizeof(struct page *));
1165
1166                if (!new.rmap)
1167                        goto out_free;
1168
1169                memset(new.rmap, 0, npages * sizeof(*new.rmap));
1170
1171                new.user_alloc = user_alloc;
1172                /*
1173                 * hva_to_rmmap() serialzies with the mmu_lock and to be
1174                 * safe it has to ignore memslots with !user_alloc &&
1175                 * !userspace_addr.
1176                 */
1177                if (user_alloc)
1178                        new.userspace_addr = mem->userspace_addr;
1179                else
1180                        new.userspace_addr = 0;
1181        }
1182        if (!npages)
1183                goto skip_lpage;
1184
1185        for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
1186                unsigned long ugfn;
1187                unsigned long j;
1188                int lpages;
1189                int level = i + 2;
1190
1191                /* Avoid unused variable warning if no large pages */
1192                (void)level;
1193
1194                if (new.lpage_info[i])
1195                        continue;
1196
1197                lpages = 1 + (base_gfn + npages - 1) /
1198                             KVM_PAGES_PER_HPAGE(level);
1199                lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
1200
1201                new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
1202
1203                if (!new.lpage_info[i])
1204                        goto out_free;
1205
1206                memset(new.lpage_info[i], 0,
1207                       lpages * sizeof(*new.lpage_info[i]));
1208
1209                if (base_gfn % KVM_PAGES_PER_HPAGE(level))
1210                        new.lpage_info[i][0].write_count = 1;
1211                if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
1212                        new.lpage_info[i][lpages - 1].write_count = 1;
1213                ugfn = new.userspace_addr >> PAGE_SHIFT;
1214                /*
1215                 * If the gfn and userspace address are not aligned wrt each
1216                 * other, or if explicitly asked to, disable large page
1217                 * support for this slot
1218                 */
1219                if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
1220                    !largepages_enabled)
1221                        for (j = 0; j < lpages; ++j)
1222                                new.lpage_info[i][j].write_count = 1;
1223        }
1224
1225skip_lpage:
1226
1227        /* Allocate page dirty bitmap if needed */
1228        if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
1229                unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
1230
1231                new.dirty_bitmap = vmalloc(dirty_bytes);
1232                if (!new.dirty_bitmap)
1233                        goto out_free;
1234                memset(new.dirty_bitmap, 0, dirty_bytes);
1235                if (old.npages)
1236                        kvm_arch_flush_shadow(kvm);
1237        }
1238#else  /* not defined CONFIG_S390 */
1239        new.user_alloc = user_alloc;
1240        if (user_alloc)
1241                new.userspace_addr = mem->userspace_addr;
1242#endif /* not defined CONFIG_S390 */
1243
1244        if (!npages)
1245                kvm_arch_flush_shadow(kvm);
1246
1247        spin_lock(&kvm->mmu_lock);
1248        if (mem->slot >= kvm->nmemslots)
1249                kvm->nmemslots = mem->slot + 1;
1250
1251        *memslot = new;
1252        spin_unlock(&kvm->mmu_lock);
1253
1254        r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
1255        if (r) {
1256                spin_lock(&kvm->mmu_lock);
1257                *memslot = old;
1258                spin_unlock(&kvm->mmu_lock);
1259                goto out_free;
1260        }
1261
1262        kvm_free_physmem_slot(&old, npages ? &new : NULL);
1263        /* Slot deletion case: we have to update the current slot */
1264        spin_lock(&kvm->mmu_lock);
1265        if (!npages)
1266                *memslot = old;
1267        spin_unlock(&kvm->mmu_lock);
1268#ifdef CONFIG_DMAR
1269        /* map the pages in iommu page table */
1270        r = kvm_iommu_map_pages(kvm, base_gfn, npages);
1271        if (r)
1272                goto out;
1273#endif
1274        return 0;
1275
1276out_free:
1277        kvm_free_physmem_slot(&new, &old);
1278out:
1279        return r;
1280
1281}
1282EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1283
1284int kvm_set_memory_region(struct kvm *kvm,
1285                          struct kvm_userspace_memory_region *mem,
1286                          int user_alloc)
1287{
1288        int r;
1289
1290        down_write(&kvm->slots_lock);
1291        r = __kvm_set_memory_region(kvm, mem, user_alloc);
1292        up_write(&kvm->slots_lock);
1293        return r;
1294}
1295EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1296
1297int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1298                                   struct
1299                                   kvm_userspace_memory_region *mem,
1300                                   int user_alloc)
1301{
1302        if (mem->slot >= KVM_MEMORY_SLOTS)
1303                return -EINVAL;
1304        return kvm_set_memory_region(kvm, mem, user_alloc);
1305}
1306
1307int kvm_get_dirty_log(struct kvm *kvm,
1308                        struct kvm_dirty_log *log, int *is_dirty)
1309{
1310        struct kvm_memory_slot *memslot;
1311        int r, i;
1312        int n;
1313        unsigned long any = 0;
1314
1315        r = -EINVAL;
1316        if (log->slot >= KVM_MEMORY_SLOTS)
1317                goto out;
1318
1319        memslot = &kvm->memslots[log->slot];
1320        r = -ENOENT;
1321        if (!memslot->dirty_bitmap)
1322                goto out;
1323
1324        n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1325
1326        for (i = 0; !any && i < n/sizeof(long); ++i)
1327                any = memslot->dirty_bitmap[i];
1328
1329        r = -EFAULT;
1330        if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1331                goto out;
1332
1333        if (any)
1334                *is_dirty = 1;
1335
1336        r = 0;
1337out:
1338        return r;
1339}
1340
1341void kvm_disable_largepages(void)
1342{
1343        largepages_enabled = false;
1344}
1345EXPORT_SYMBOL_GPL(kvm_disable_largepages);
1346
1347int is_error_page(struct page *page)
1348{
1349        return page == bad_page;
1350}
1351EXPORT_SYMBOL_GPL(is_error_page);
1352
1353int is_error_pfn(pfn_t pfn)
1354{
1355        return pfn == bad_pfn;
1356}
1357EXPORT_SYMBOL_GPL(is_error_pfn);
1358
1359static inline unsigned long bad_hva(void)
1360{
1361        return PAGE_OFFSET;
1362}
1363
1364int kvm_is_error_hva(unsigned long addr)
1365{
1366        return addr == bad_hva();
1367}
1368EXPORT_SYMBOL_GPL(kvm_is_error_hva);
1369
1370struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
1371{
1372        int i;
1373
1374        for (i = 0; i < kvm->nmemslots; ++i) {
1375                struct kvm_memory_slot *memslot = &kvm->memslots[i];
1376
1377                if (gfn >= memslot->base_gfn
1378                    && gfn < memslot->base_gfn + memslot->npages)
1379                        return memslot;
1380        }
1381        return NULL;
1382}
1383EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
1384
1385struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1386{
1387        gfn = unalias_gfn(kvm, gfn);
1388        return gfn_to_memslot_unaliased(kvm, gfn);
1389}
1390
1391int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1392{
1393        int i;
1394
1395        gfn = unalias_gfn(kvm, gfn);
1396        for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1397                struct kvm_memory_slot *memslot = &kvm->memslots[i];
1398
1399                if (gfn >= memslot->base_gfn
1400                    && gfn < memslot->base_gfn + memslot->npages)
1401                        return 1;
1402        }
1403        return 0;
1404}
1405EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1406
1407unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1408{
1409        struct kvm_memory_slot *slot;
1410
1411        gfn = unalias_gfn(kvm, gfn);
1412        slot = gfn_to_memslot_unaliased(kvm, gfn);
1413        if (!slot)
1414                return bad_hva();
1415        return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
1416}
1417EXPORT_SYMBOL_GPL(gfn_to_hva);
1418
1419pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1420{
1421        struct page *page[1];
1422        unsigned long addr;
1423        int npages;
1424        pfn_t pfn;
1425
1426        might_sleep();
1427
1428        addr = gfn_to_hva(kvm, gfn);
1429        if (kvm_is_error_hva(addr)) {
1430                get_page(bad_page);
1431                return page_to_pfn(bad_page);
1432        }
1433
1434        npages = get_user_pages_fast(addr, 1, 1, page);
1435
1436        if (unlikely(npages != 1)) {
1437                struct vm_area_struct *vma;
1438
1439                down_read(&current->mm->mmap_sem);
1440                vma = find_vma(current->mm, addr);
1441
1442                if (vma == NULL || addr < vma->vm_start ||
1443                    !(vma->vm_flags & VM_PFNMAP)) {
1444                        up_read(&current->mm->mmap_sem);
1445                        get_page(bad_page);
1446                        return page_to_pfn(bad_page);
1447                }
1448
1449                pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1450                up_read(&current->mm->mmap_sem);
1451                BUG_ON(!kvm_is_mmio_pfn(pfn));
1452        } else
1453                pfn = page_to_pfn(page[0]);
1454
1455        return pfn;
1456}
1457
1458EXPORT_SYMBOL_GPL(gfn_to_pfn);
1459
1460struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1461{
1462        pfn_t pfn;
1463
1464        pfn = gfn_to_pfn(kvm, gfn);
1465        if (!kvm_is_mmio_pfn(pfn))
1466                return pfn_to_page(pfn);
1467
1468        WARN_ON(kvm_is_mmio_pfn(pfn));
1469
1470        get_page(bad_page);
1471        return bad_page;
1472}
1473
1474EXPORT_SYMBOL_GPL(gfn_to_page);
1475
1476void kvm_release_page_clean(struct page *page)
1477{
1478        kvm_release_pfn_clean(page_to_pfn(page));
1479}
1480EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1481
1482void kvm_release_pfn_clean(pfn_t pfn)
1483{
1484        if (!kvm_is_mmio_pfn(pfn))
1485                put_page(pfn_to_page(pfn));
1486}
1487EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1488
1489void kvm_release_page_dirty(struct page *page)
1490{
1491        kvm_release_pfn_dirty(page_to_pfn(page));
1492}
1493EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1494
1495void kvm_release_pfn_dirty(pfn_t pfn)
1496{
1497        kvm_set_pfn_dirty(pfn);
1498        kvm_release_pfn_clean(pfn);
1499}
1500EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1501
1502void kvm_set_page_dirty(struct page *page)
1503{
1504        kvm_set_pfn_dirty(page_to_pfn(page));
1505}
1506EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1507
1508void kvm_set_pfn_dirty(pfn_t pfn)
1509{
1510        if (!kvm_is_mmio_pfn(pfn)) {
1511                struct page *page = pfn_to_page(pfn);
1512                if (!PageReserved(page))
1513                        SetPageDirty(page);
1514        }
1515}
1516EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1517
1518void kvm_set_pfn_accessed(pfn_t pfn)
1519{
1520        if (!kvm_is_mmio_pfn(pfn))
1521                mark_page_accessed(pfn_to_page(pfn));
1522}
1523EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1524
1525void kvm_get_pfn(pfn_t pfn)
1526{
1527        if (!kvm_is_mmio_pfn(pfn))
1528                get_page(pfn_to_page(pfn));
1529}
1530EXPORT_SYMBOL_GPL(kvm_get_pfn);
1531
1532static int next_segment(unsigned long len, int offset)
1533{
1534        if (len > PAGE_SIZE - offset)
1535                return PAGE_SIZE - offset;
1536        else
1537                return len;
1538}
1539
1540int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1541                        int len)
1542{
1543        int r;
1544        unsigned long addr;
1545
1546        addr = gfn_to_hva(kvm, gfn);
1547        if (kvm_is_error_hva(addr))
1548                return -EFAULT;
1549        r = copy_from_user(data, (void __user *)addr + offset, len);
1550        if (r)
1551                return -EFAULT;
1552        return 0;
1553}
1554EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1555
1556int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1557{
1558        gfn_t gfn = gpa >> PAGE_SHIFT;
1559        int seg;
1560        int offset = offset_in_page(gpa);
1561        int ret;
1562
1563        while ((seg = next_segment(len, offset)) != 0) {
1564                ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1565                if (ret < 0)
1566                        return ret;
1567                offset = 0;
1568                len -= seg;
1569                data += seg;
1570                ++gfn;
1571        }
1572        return 0;
1573}
1574EXPORT_SYMBOL_GPL(kvm_read_guest);
1575
1576int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1577                          unsigned long len)
1578{
1579        int r;
1580        unsigned long addr;
1581        gfn_t gfn = gpa >> PAGE_SHIFT;
1582        int offset = offset_in_page(gpa);
1583
1584        addr = gfn_to_hva(kvm, gfn);
1585        if (kvm_is_error_hva(addr))
1586                return -EFAULT;
1587        pagefault_disable();
1588        r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1589        pagefault_enable();
1590        if (r)
1591                return -EFAULT;
1592        return 0;
1593}
1594EXPORT_SYMBOL(kvm_read_guest_atomic);
1595
1596int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1597                         int offset, int len)
1598{
1599        int r;
1600        unsigned long addr;
1601
1602        addr = gfn_to_hva(kvm, gfn);
1603        if (kvm_is_error_hva(addr))
1604                return -EFAULT;
1605        r = copy_to_user((void __user *)addr + offset, data, len);
1606        if (r)
1607                return -EFAULT;
1608        mark_page_dirty(kvm, gfn);
1609        return 0;
1610}
1611EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1612
1613int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1614                    unsigned long len)
1615{
1616        gfn_t gfn = gpa >> PAGE_SHIFT;
1617        int seg;
1618        int offset = offset_in_page(gpa);
1619        int ret;
1620
1621        while ((seg = next_segment(len, offset)) != 0) {
1622                ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1623                if (ret < 0)
1624                        return ret;
1625                offset = 0;
1626                len -= seg;
1627                data += seg;
1628                ++gfn;
1629        }
1630        return 0;
1631}
1632
1633int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1634{
1635        return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
1636}
1637EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1638
1639int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1640{
1641        gfn_t gfn = gpa >> PAGE_SHIFT;
1642        int seg;
1643        int offset = offset_in_page(gpa);
1644        int ret;
1645
1646        while ((seg = next_segment(len, offset)) != 0) {
1647                ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1648                if (ret < 0)
1649                        return ret;
1650                offset = 0;
1651                len -= seg;
1652                ++gfn;
1653        }
1654        return 0;
1655}
1656EXPORT_SYMBOL_GPL(kvm_clear_guest);
1657
1658void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1659{
1660        struct kvm_memory_slot *memslot;
1661
1662        gfn = unalias_gfn(kvm, gfn);
1663        memslot = gfn_to_memslot_unaliased(kvm, gfn);
1664        if (memslot && memslot->dirty_bitmap) {
1665                unsigned long rel_gfn = gfn - memslot->base_gfn;
1666
1667                /* avoid RMW */
1668                if (!test_bit(rel_gfn, memslot->dirty_bitmap))
1669                        set_bit(rel_gfn, memslot->dirty_bitmap);
1670        }
1671}
1672
1673/*
1674 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1675 */
1676void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1677{
1678        DEFINE_WAIT(wait);
1679
1680        for (;;) {
1681                prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1682
1683                if (kvm_arch_vcpu_runnable(vcpu)) {
1684                        set_bit(KVM_REQ_UNHALT, &vcpu->requests);
1685                        break;
1686                }
1687                if (kvm_cpu_has_pending_timer(vcpu))
1688                        break;
1689                if (signal_pending(current))
1690                        break;
1691
1692                vcpu_put(vcpu);
1693                schedule();
1694                vcpu_load(vcpu);
1695        }
1696
1697        finish_wait(&vcpu->wq, &wait);
1698}
1699
1700void kvm_resched(struct kvm_vcpu *vcpu)
1701{
1702        if (!need_resched())
1703                return;
1704        cond_resched();
1705}
1706EXPORT_SYMBOL_GPL(kvm_resched);
1707
1708static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1709{
1710        struct kvm_vcpu *vcpu = vma->vm_file->private_data;
1711        struct page *page;
1712
1713        if (vmf->pgoff == 0)
1714                page = virt_to_page(vcpu->run);
1715#ifdef CONFIG_X86
1716        else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1717                page = virt_to_page(vcpu->arch.pio_data);
1718#endif
1719#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1720        else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1721                page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
1722#endif
1723        else
1724                return VM_FAULT_SIGBUS;
1725        get_page(page);
1726        vmf->page = page;
1727        return 0;
1728}
1729
1730static const struct vm_operations_struct kvm_vcpu_vm_ops = {
1731        .fault = kvm_vcpu_fault,
1732};
1733
1734static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1735{
1736        vma->vm_ops = &kvm_vcpu_vm_ops;
1737        return 0;
1738}
1739
1740static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1741{
1742        struct kvm_vcpu *vcpu = filp->private_data;
1743
1744        kvm_put_kvm(vcpu->kvm);
1745        return 0;
1746}
1747
1748static struct file_operations kvm_vcpu_fops = {
1749        .release        = kvm_vcpu_release,
1750        .unlocked_ioctl = kvm_vcpu_ioctl,
1751        .compat_ioctl   = kvm_vcpu_ioctl,
1752        .mmap           = kvm_vcpu_mmap,
1753};
1754
1755/*
1756 * Allocates an inode for the vcpu.
1757 */
1758static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1759{
1760        return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
1761}
1762
1763/*
1764 * Creates some virtual cpus.  Good luck creating more than one.
1765 */
1766static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1767{
1768        int r;
1769        struct kvm_vcpu *vcpu, *v;
1770
1771        vcpu = kvm_arch_vcpu_create(kvm, id);
1772        if (IS_ERR(vcpu))
1773                return PTR_ERR(vcpu);
1774
1775        preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1776
1777        r = kvm_arch_vcpu_setup(vcpu);
1778        if (r)
1779                return r;
1780
1781        mutex_lock(&kvm->lock);
1782        if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1783                r = -EINVAL;
1784                goto vcpu_destroy;
1785        }
1786
1787        kvm_for_each_vcpu(r, v, kvm)
1788                if (v->vcpu_id == id) {
1789                        r = -EEXIST;
1790                        goto vcpu_destroy;
1791                }
1792
1793        BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
1794
1795        /* Now it's all set up, let userspace reach it */
1796        kvm_get_kvm(kvm);
1797        r = create_vcpu_fd(vcpu);
1798        if (r < 0) {
1799                kvm_put_kvm(kvm);
1800                goto vcpu_destroy;
1801        }
1802
1803        kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1804        smp_wmb();
1805        atomic_inc(&kvm->online_vcpus);
1806
1807#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1808        if (kvm->bsp_vcpu_id == id)
1809                kvm->bsp_vcpu = vcpu;
1810#endif
1811        mutex_unlock(&kvm->lock);
1812        return r;
1813
1814vcpu_destroy:
1815        mutex_unlock(&kvm->lock);
1816        kvm_arch_vcpu_destroy(vcpu);
1817        return r;
1818}
1819
1820static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1821{
1822        if (sigset) {
1823                sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1824                vcpu->sigset_active = 1;
1825                vcpu->sigset = *sigset;
1826        } else
1827                vcpu->sigset_active = 0;
1828        return 0;
1829}
1830
1831#ifdef __KVM_HAVE_MSIX
1832static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
1833                                    struct kvm_assigned_msix_nr *entry_nr)
1834{
1835        int r = 0;
1836        struct kvm_assigned_dev_kernel *adev;
1837
1838        mutex_lock(&kvm->lock);
1839
1840        adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
1841                                      entry_nr->assigned_dev_id);
1842        if (!adev) {
1843                r = -EINVAL;
1844                goto msix_nr_out;
1845        }
1846
1847        if (adev->entries_nr == 0) {
1848                adev->entries_nr = entry_nr->entry_nr;
1849                if (adev->entries_nr == 0 ||
1850                    adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) {
1851                        r = -EINVAL;
1852                        goto msix_nr_out;
1853                }
1854
1855                adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
1856                                                entry_nr->entry_nr,
1857                                                GFP_KERNEL);
1858                if (!adev->host_msix_entries) {
1859                        r = -ENOMEM;
1860                        goto msix_nr_out;
1861                }
1862                adev->guest_msix_entries = kzalloc(
1863                                sizeof(struct kvm_guest_msix_entry) *
1864                                entry_nr->entry_nr, GFP_KERNEL);
1865                if (!adev->guest_msix_entries) {
1866                        kfree(adev->host_msix_entries);
1867                        r = -ENOMEM;
1868                        goto msix_nr_out;
1869                }
1870        } else /* Not allowed set MSI-X number twice */
1871                r = -EINVAL;
1872msix_nr_out:
1873        mutex_unlock(&kvm->lock);
1874        return r;
1875}
1876
1877static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
1878                                       struct kvm_assigned_msix_entry *entry)
1879{
1880        int r = 0, i;
1881        struct kvm_assigned_dev_kernel *adev;
1882
1883        mutex_lock(&kvm->lock);
1884
1885        adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
1886                                      entry->assigned_dev_id);
1887
1888        if (!adev) {
1889                r = -EINVAL;
1890                goto msix_entry_out;
1891        }
1892
1893        for (i = 0; i < adev->entries_nr; i++)
1894                if (adev->guest_msix_entries[i].vector == 0 ||
1895                    adev->guest_msix_entries[i].entry == entry->entry) {
1896                        adev->guest_msix_entries[i].entry = entry->entry;
1897                        adev->guest_msix_entries[i].vector = entry->gsi;
1898                        adev->host_msix_entries[i].entry = entry->entry;
1899                        break;
1900                }
1901        if (i == adev->entries_nr) {
1902                r = -ENOSPC;
1903                goto msix_entry_out;
1904        }
1905
1906msix_entry_out:
1907        mutex_unlock(&kvm->lock);
1908
1909        return r;
1910}
1911#endif
1912
1913static long kvm_vcpu_ioctl(struct file *filp,
1914                           unsigned int ioctl, unsigned long arg)
1915{
1916        struct kvm_vcpu *vcpu = filp->private_data;
1917        void __user *argp = (void __user *)arg;
1918        int r;
1919        struct kvm_fpu *fpu = NULL;
1920        struct kvm_sregs *kvm_sregs = NULL;
1921
1922        if (vcpu->kvm->mm != current->mm)
1923                return -EIO;
1924        switch (ioctl) {
1925        case KVM_RUN:
1926                r = -EINVAL;
1927                if (arg)
1928                        goto out;
1929                r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
1930                break;
1931        case KVM_GET_REGS: {
1932                struct kvm_regs *kvm_regs;
1933
1934                r = -ENOMEM;
1935                kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1936                if (!kvm_regs)
1937                        goto out;
1938                r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1939                if (r)
1940                        goto out_free1;
1941                r = -EFAULT;
1942                if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1943                        goto out_free1;
1944                r = 0;
1945out_free1:
1946                kfree(kvm_regs);
1947                break;
1948        }
1949        case KVM_SET_REGS: {
1950                struct kvm_regs *kvm_regs;
1951
1952                r = -ENOMEM;
1953                kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1954                if (!kvm_regs)
1955                        goto out;
1956                r = -EFAULT;
1957                if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1958                        goto out_free2;
1959                r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
1960                if (r)
1961                        goto out_free2;
1962                r = 0;
1963out_free2:
1964                kfree(kvm_regs);
1965                break;
1966        }
1967        case KVM_GET_SREGS: {
1968                kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1969                r = -ENOMEM;
1970                if (!kvm_sregs)
1971                        goto out;
1972                r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
1973                if (r)
1974                        goto out;
1975                r = -EFAULT;
1976                if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
1977                        goto out;
1978                r = 0;
1979                break;
1980        }
1981        case KVM_SET_SREGS: {
1982                kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1983                r = -ENOMEM;
1984                if (!kvm_sregs)
1985                        goto out;
1986                r = -EFAULT;
1987                if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
1988                        goto out;
1989                r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
1990                if (r)
1991                        goto out;
1992                r = 0;
1993                break;
1994        }
1995        case KVM_GET_MP_STATE: {
1996                struct kvm_mp_state mp_state;
1997
1998                r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1999                if (r)
2000                        goto out;
2001                r = -EFAULT;
2002                if (copy_to_user(argp, &mp_state, sizeof mp_state))
2003                        goto out;
2004                r = 0;
2005                break;
2006        }
2007        case KVM_SET_MP_STATE: {
2008                struct kvm_mp_state mp_state;
2009
2010                r = -EFAULT;
2011                if (copy_from_user(&mp_state, argp, sizeof mp_state))
2012                        goto out;
2013                r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
2014                if (r)
2015                        goto out;
2016                r = 0;
2017                break;
2018        }
2019        case KVM_TRANSLATE: {
2020                struct kvm_translation tr;
2021
2022                r = -EFAULT;
2023                if (copy_from_user(&tr, argp, sizeof tr))
2024                        goto out;
2025                r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
2026                if (r)
2027                        goto out;
2028                r = -EFAULT;
2029                if (copy_to_user(argp, &tr, sizeof tr))
2030                        goto out;
2031                r = 0;
2032                break;
2033        }
2034        case KVM_SET_GUEST_DEBUG: {
2035                struct kvm_guest_debug dbg;
2036
2037                r = -EFAULT;
2038                if (copy_from_user(&dbg, argp, sizeof dbg))
2039                        goto out;
2040                r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
2041                if (r)
2042                        goto out;
2043                r = 0;
2044                break;
2045        }
2046        case KVM_SET_SIGNAL_MASK: {
2047                struct kvm_signal_mask __user *sigmask_arg = argp;
2048                struct kvm_signal_mask kvm_sigmask;
2049                sigset_t sigset, *p;
2050
2051                p = NULL;
2052                if (argp) {
2053                        r = -EFAULT;
2054                        if (copy_from_user(&kvm_sigmask, argp,
2055                                           sizeof kvm_sigmask))
2056                                goto out;
2057                        r = -EINVAL;
2058                        if (kvm_sigmask.len != sizeof sigset)
2059                                goto out;
2060                        r = -EFAULT;
2061                        if (copy_from_user(&sigset, sigmask_arg->sigset,
2062                                           sizeof sigset))
2063                                goto out;
2064                        p = &sigset;
2065                }
2066                r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2067                break;
2068        }
2069        case KVM_GET_FPU: {
2070                fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2071                r = -ENOMEM;
2072                if (!fpu)
2073                        goto out;
2074                r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
2075                if (r)
2076                        goto out;
2077                r = -EFAULT;
2078                if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
2079                        goto out;
2080                r = 0;
2081                break;
2082        }
2083        case KVM_SET_FPU: {
2084                fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2085                r = -ENOMEM;
2086                if (!fpu)
2087                        goto out;
2088                r = -EFAULT;
2089                if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
2090                        goto out;
2091                r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
2092                if (r)
2093                        goto out;
2094                r = 0;
2095                break;
2096        }
2097        default:
2098                r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
2099        }
2100out:
2101        kfree(fpu);
2102        kfree(kvm_sregs);
2103        return r;
2104}
2105
2106static long kvm_vm_ioctl(struct file *filp,
2107                           unsigned int ioctl, unsigned long arg)
2108{
2109        struct kvm *kvm = filp->private_data;
2110        void __user *argp = (void __user *)arg;
2111        int r;
2112
2113        if (kvm->mm != current->mm)
2114                return -EIO;
2115        switch (ioctl) {
2116        case KVM_CREATE_VCPU:
2117                r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2118                if (r < 0)
2119                        goto out;
2120                break;
2121        case KVM_SET_USER_MEMORY_REGION: {
2122                struct kvm_userspace_memory_region kvm_userspace_mem;
2123
2124                r = -EFAULT;
2125                if (copy_from_user(&kvm_userspace_mem, argp,
2126                                                sizeof kvm_userspace_mem))
2127                        goto out;
2128
2129                r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
2130                if (r)
2131                        goto out;
2132                break;
2133        }
2134        case KVM_GET_DIRTY_LOG: {
2135                struct kvm_dirty_log log;
2136
2137                r = -EFAULT;
2138                if (copy_from_user(&log, argp, sizeof log))
2139                        goto out;
2140                r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
2141                if (r)
2142                        goto out;
2143                break;
2144        }
2145#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2146        case KVM_REGISTER_COALESCED_MMIO: {
2147                struct kvm_coalesced_mmio_zone zone;
2148                r = -EFAULT;
2149                if (copy_from_user(&zone, argp, sizeof zone))
2150                        goto out;
2151                r = -ENXIO;
2152                r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
2153                if (r)
2154                        goto out;
2155                r = 0;
2156                break;
2157        }
2158        case KVM_UNREGISTER_COALESCED_MMIO: {
2159                struct kvm_coalesced_mmio_zone zone;
2160                r = -EFAULT;
2161                if (copy_from_user(&zone, argp, sizeof zone))
2162                        goto out;
2163                r = -ENXIO;
2164                r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
2165                if (r)
2166                        goto out;
2167                r = 0;
2168                break;
2169        }
2170#endif
2171#ifdef KVM_CAP_DEVICE_ASSIGNMENT
2172        case KVM_ASSIGN_PCI_DEVICE: {
2173                struct kvm_assigned_pci_dev assigned_dev;
2174
2175                r = -EFAULT;
2176                if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
2177                        goto out;
2178                r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
2179                if (r)
2180                        goto out;
2181                break;
2182        }
2183        case KVM_ASSIGN_IRQ: {
2184                r = -EOPNOTSUPP;
2185                break;
2186        }
2187#ifdef KVM_CAP_ASSIGN_DEV_IRQ
2188        case KVM_ASSIGN_DEV_IRQ: {
2189                struct kvm_assigned_irq assigned_irq;
2190
2191                r = -EFAULT;
2192                if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
2193                        goto out;
2194                r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
2195                if (r)
2196                        goto out;
2197                break;
2198        }
2199        case KVM_DEASSIGN_DEV_IRQ: {
2200                struct kvm_assigned_irq assigned_irq;
2201
2202                r = -EFAULT;
2203                if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
2204                        goto out;
2205                r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
2206                if (r)
2207                        goto out;
2208                break;
2209        }
2210#endif
2211#endif
2212#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
2213        case KVM_DEASSIGN_PCI_DEVICE: {
2214                struct kvm_assigned_pci_dev assigned_dev;
2215
2216                r = -EFAULT;
2217                if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
2218                        goto out;
2219                r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
2220                if (r)
2221                        goto out;
2222                break;
2223        }
2224#endif
2225#ifdef KVM_CAP_IRQ_ROUTING
2226        case KVM_SET_GSI_ROUTING: {
2227                struct kvm_irq_routing routing;
2228                struct kvm_irq_routing __user *urouting;
2229                struct kvm_irq_routing_entry *entries;
2230
2231                r = -EFAULT;
2232                if (copy_from_user(&routing, argp, sizeof(routing)))
2233                        goto out;
2234                r = -EINVAL;
2235                if (routing.nr >= KVM_MAX_IRQ_ROUTES)
2236                        goto out;
2237                if (routing.flags)
2238                        goto out;
2239                r = -ENOMEM;
2240                entries = vmalloc(routing.nr * sizeof(*entries));
2241                if (!entries)
2242                        goto out;
2243                r = -EFAULT;
2244                urouting = argp;
2245                if (copy_from_user(entries, urouting->entries,
2246                                   routing.nr * sizeof(*entries)))
2247                        goto out_free_irq_routing;
2248                r = kvm_set_irq_routing(kvm, entries, routing.nr,
2249                                        routing.flags);
2250        out_free_irq_routing:
2251                vfree(entries);
2252                break;
2253        }
2254#endif /* KVM_CAP_IRQ_ROUTING */
2255#ifdef __KVM_HAVE_MSIX
2256        case KVM_ASSIGN_SET_MSIX_NR: {
2257                struct kvm_assigned_msix_nr entry_nr;
2258                r = -EFAULT;
2259                if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
2260                        goto out;
2261                r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
2262                if (r)
2263                        goto out;
2264                break;
2265        }
2266        case KVM_ASSIGN_SET_MSIX_ENTRY: {
2267                struct kvm_assigned_msix_entry entry;
2268                r = -EFAULT;
2269                if (copy_from_user(&entry, argp, sizeof entry))
2270                        goto out;
2271                r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
2272                if (r)
2273                        goto out;
2274                break;
2275        }
2276#endif
2277        case KVM_IRQFD: {
2278                struct kvm_irqfd data;
2279
2280                r = -EFAULT;
2281                if (copy_from_user(&data, argp, sizeof data))
2282                        goto out;
2283                r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
2284                break;
2285        }
2286        case KVM_IOEVENTFD: {
2287                struct kvm_ioeventfd data;
2288
2289                r = -EFAULT;
2290                if (copy_from_user(&data, argp, sizeof data))
2291                        goto out;
2292                r = kvm_ioeventfd(kvm, &data);
2293                break;
2294        }
2295#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2296        case KVM_SET_BOOT_CPU_ID:
2297                r = 0;
2298                mutex_lock(&kvm->lock);
2299                if (atomic_read(&kvm->online_vcpus) != 0)
2300                        r = -EBUSY;
2301                else
2302                        kvm->bsp_vcpu_id = arg;
2303                mutex_unlock(&kvm->lock);
2304                break;
2305#endif
2306        default:
2307                r = kvm_arch_vm_ioctl(filp, ioctl, arg);
2308        }
2309out:
2310        return r;
2311}
2312
2313static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2314{
2315        struct page *page[1];
2316        unsigned long addr;
2317        int npages;
2318        gfn_t gfn = vmf->pgoff;
2319        struct kvm *kvm = vma->vm_file->private_data;
2320
2321        addr = gfn_to_hva(kvm, gfn);
2322        if (kvm_is_error_hva(addr))
2323                return VM_FAULT_SIGBUS;
2324
2325        npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
2326                                NULL);
2327        if (unlikely(npages != 1))
2328                return VM_FAULT_SIGBUS;
2329
2330        vmf->page = page[0];
2331        return 0;
2332}
2333
2334static const struct vm_operations_struct kvm_vm_vm_ops = {
2335        .fault = kvm_vm_fault,
2336};
2337
2338static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2339{
2340        vma->vm_ops = &kvm_vm_vm_ops;
2341        return 0;
2342}
2343
2344static struct file_operations kvm_vm_fops = {
2345        .release        = kvm_vm_release,
2346        .unlocked_ioctl = kvm_vm_ioctl,
2347        .compat_ioctl   = kvm_vm_ioctl,
2348        .mmap           = kvm_vm_mmap,
2349};
2350
2351static int kvm_dev_ioctl_create_vm(void)
2352{
2353        int fd;
2354        struct kvm *kvm;
2355
2356        kvm = kvm_create_vm();
2357        if (IS_ERR(kvm))
2358                return PTR_ERR(kvm);
2359        fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
2360        if (fd < 0)
2361                kvm_put_kvm(kvm);
2362
2363        return fd;
2364}
2365
2366static long kvm_dev_ioctl_check_extension_generic(long arg)
2367{
2368        switch (arg) {
2369        case KVM_CAP_USER_MEMORY:
2370        case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
2371        case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
2372#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2373        case KVM_CAP_SET_BOOT_CPU_ID:
2374#endif
2375                return 1;
2376#ifdef CONFIG_HAVE_KVM_IRQCHIP
2377        case KVM_CAP_IRQ_ROUTING:
2378                return KVM_MAX_IRQ_ROUTES;
2379#endif
2380        default:
2381                break;
2382        }
2383        return kvm_dev_ioctl_check_extension(arg);
2384}
2385
2386static long kvm_dev_ioctl(struct file *filp,
2387                          unsigned int ioctl, unsigned long arg)
2388{
2389        long r = -EINVAL;
2390
2391        switch (ioctl) {
2392        case KVM_GET_API_VERSION:
2393                r = -EINVAL;
2394                if (arg)
2395                        goto out;
2396                r = KVM_API_VERSION;
2397                break;
2398        case KVM_CREATE_VM:
2399                r = -EINVAL;
2400                if (arg)
2401                        goto out;
2402                r = kvm_dev_ioctl_create_vm();
2403                break;
2404        case KVM_CHECK_EXTENSION:
2405                r = kvm_dev_ioctl_check_extension_generic(arg);
2406                break;
2407        case KVM_GET_VCPU_MMAP_SIZE:
2408                r = -EINVAL;
2409                if (arg)
2410                        goto out;
2411                r = PAGE_SIZE;     /* struct kvm_run */
2412#ifdef CONFIG_X86
2413                r += PAGE_SIZE;    /* pio data page */
2414#endif
2415#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2416                r += PAGE_SIZE;    /* coalesced mmio ring page */
2417#endif
2418                break;
2419        case KVM_TRACE_ENABLE:
2420        case KVM_TRACE_PAUSE:
2421        case KVM_TRACE_DISABLE:
2422                r = -EOPNOTSUPP;
2423                break;
2424        default:
2425                return kvm_arch_dev_ioctl(filp, ioctl, arg);
2426        }
2427out:
2428        return r;
2429}
2430
2431static struct file_operations kvm_chardev_ops = {
2432        .unlocked_ioctl = kvm_dev_ioctl,
2433        .compat_ioctl   = kvm_dev_ioctl,
2434};
2435
2436static struct miscdevice kvm_dev = {
2437        KVM_MINOR,
2438        "kvm",
2439        &kvm_chardev_ops,
2440};
2441
2442static void hardware_enable(void *junk)
2443{
2444        int cpu = raw_smp_processor_id();
2445
2446        if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
2447                return;
2448        cpumask_set_cpu(cpu, cpus_hardware_enabled);
2449        kvm_arch_hardware_enable(NULL);
2450}
2451
2452static void hardware_disable(void *junk)
2453{
2454        int cpu = raw_smp_processor_id();
2455
2456        if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
2457                return;
2458        cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2459        kvm_arch_hardware_disable(NULL);
2460}
2461
2462static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2463                           void *v)
2464{
2465        int cpu = (long)v;
2466
2467        val &= ~CPU_TASKS_FROZEN;
2468        switch (val) {
2469        case CPU_DYING:
2470                printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2471                       cpu);
2472                hardware_disable(NULL);
2473                break;
2474        case CPU_UP_CANCELED:
2475                printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2476                       cpu);
2477                smp_call_function_single(cpu, hardware_disable, NULL, 1);
2478                break;
2479        case CPU_ONLINE:
2480                printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2481                       cpu);
2482                smp_call_function_single(cpu, hardware_enable, NULL, 1);
2483                break;
2484        }
2485        return NOTIFY_OK;
2486}
2487
2488
2489asmlinkage void kvm_handle_fault_on_reboot(void)
2490{
2491        if (kvm_rebooting)
2492                /* spin while reset goes on */
2493                while (true)
2494                        ;
2495        /* Fault while not rebooting.  We want the trace. */
2496        BUG();
2497}
2498EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
2499
2500static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2501                      void *v)
2502{
2503        /*
2504         * Some (well, at least mine) BIOSes hang on reboot if
2505         * in vmx root mode.
2506         *
2507         * And Intel TXT required VMX off for all cpu when system shutdown.
2508         */
2509        printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2510        kvm_rebooting = true;
2511        on_each_cpu(hardware_disable, NULL, 1);
2512        return NOTIFY_OK;
2513}
2514
2515static struct notifier_block kvm_reboot_notifier = {
2516        .notifier_call = kvm_reboot,
2517        .priority = 0,
2518};
2519
2520void kvm_io_bus_init(struct kvm_io_bus *bus)
2521{
2522        memset(bus, 0, sizeof(*bus));
2523}
2524
2525void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2526{
2527        int i;
2528
2529        for (i = 0; i < bus->dev_count; i++) {
2530                struct kvm_io_device *pos = bus->devs[i];
2531
2532                kvm_iodevice_destructor(pos);
2533        }
2534}
2535
2536/* kvm_io_bus_write - called under kvm->slots_lock */
2537int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
2538                     int len, const void *val)
2539{
2540        int i;
2541        for (i = 0; i < bus->dev_count; i++)
2542                if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
2543                        return 0;
2544        return -EOPNOTSUPP;
2545}
2546
2547/* kvm_io_bus_read - called under kvm->slots_lock */
2548int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
2549{
2550        int i;
2551        for (i = 0; i < bus->dev_count; i++)
2552                if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
2553                        return 0;
2554        return -EOPNOTSUPP;
2555}
2556
2557int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
2558                             struct kvm_io_device *dev)
2559{
2560        int ret;
2561
2562        down_write(&kvm->slots_lock);
2563        ret = __kvm_io_bus_register_dev(bus, dev);
2564        up_write(&kvm->slots_lock);
2565
2566        return ret;
2567}
2568
2569/* An unlocked version. Caller must have write lock on slots_lock. */
2570int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
2571                              struct kvm_io_device *dev)
2572{
2573        if (bus->dev_count > NR_IOBUS_DEVS-1)
2574                return -ENOSPC;
2575
2576        bus->devs[bus->dev_count++] = dev;
2577
2578        return 0;
2579}
2580
2581void kvm_io_bus_unregister_dev(struct kvm *kvm,
2582                               struct kvm_io_bus *bus,
2583                               struct kvm_io_device *dev)
2584{
2585        down_write(&kvm->slots_lock);
2586        __kvm_io_bus_unregister_dev(bus, dev);
2587        up_write(&kvm->slots_lock);
2588}
2589
2590/* An unlocked version. Caller must have write lock on slots_lock. */
2591void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
2592                                 struct kvm_io_device *dev)
2593{
2594        int i;
2595
2596        for (i = 0; i < bus->dev_count; i++)
2597                if (bus->devs[i] == dev) {
2598                        bus->devs[i] = bus->devs[--bus->dev_count];
2599                        break;
2600                }
2601}
2602
2603static struct notifier_block kvm_cpu_notifier = {
2604        .notifier_call = kvm_cpu_hotplug,
2605        .priority = 20, /* must be > scheduler priority */
2606};
2607
2608static int vm_stat_get(void *_offset, u64 *val)
2609{
2610        unsigned offset = (long)_offset;
2611        struct kvm *kvm;
2612
2613        *val = 0;
2614        spin_lock(&kvm_lock);
2615        list_for_each_entry(kvm, &vm_list, vm_list)
2616                *val += *(u32 *)((void *)kvm + offset);
2617        spin_unlock(&kvm_lock);
2618        return 0;
2619}
2620
2621DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2622
2623static int vcpu_stat_get(void *_offset, u64 *val)
2624{
2625        unsigned offset = (long)_offset;
2626        struct kvm *kvm;
2627        struct kvm_vcpu *vcpu;
2628        int i;
2629
2630        *val = 0;
2631        spin_lock(&kvm_lock);
2632        list_for_each_entry(kvm, &vm_list, vm_list)
2633                kvm_for_each_vcpu(i, vcpu, kvm)
2634                        *val += *(u32 *)((void *)vcpu + offset);
2635
2636        spin_unlock(&kvm_lock);
2637        return 0;
2638}
2639
2640DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2641
2642static const struct file_operations *stat_fops[] = {
2643        [KVM_STAT_VCPU] = &vcpu_stat_fops,
2644        [KVM_STAT_VM]   = &vm_stat_fops,
2645};
2646
2647static void kvm_init_debug(void)
2648{
2649        struct kvm_stats_debugfs_item *p;
2650
2651        kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
2652        for (p = debugfs_entries; p->name; ++p)
2653                p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
2654                                                (void *)(long)p->offset,
2655                                                stat_fops[p->kind]);
2656}
2657
2658static void kvm_exit_debug(void)
2659{
2660        struct kvm_stats_debugfs_item *p;
2661
2662        for (p = debugfs_entries; p->name; ++p)
2663                debugfs_remove(p->dentry);
2664        debugfs_remove(kvm_debugfs_dir);
2665}
2666
2667static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2668{
2669        hardware_disable(NULL);
2670        return 0;
2671}
2672
2673static int kvm_resume(struct sys_device *dev)
2674{
2675        hardware_enable(NULL);
2676        return 0;
2677}
2678
2679static struct sysdev_class kvm_sysdev_class = {
2680        .name = "kvm",
2681        .suspend = kvm_suspend,
2682        .resume = kvm_resume,
2683};
2684
2685static struct sys_device kvm_sysdev = {
2686        .id = 0,
2687        .cls = &kvm_sysdev_class,
2688};
2689
2690struct page *bad_page;
2691pfn_t bad_pfn;
2692
2693static inline
2694struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2695{
2696        return container_of(pn, struct kvm_vcpu, preempt_notifier);
2697}
2698
2699static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2700{
2701        struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2702
2703        kvm_arch_vcpu_load(vcpu, cpu);
2704}
2705
2706static void kvm_sched_out(struct preempt_notifier *pn,
2707                          struct task_struct *next)
2708{
2709        struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2710
2711        kvm_arch_vcpu_put(vcpu);
2712}
2713
2714int kvm_init(void *opaque, unsigned int vcpu_size,
2715                  struct module *module)
2716{
2717        int r;
2718        int cpu;
2719
2720        r = kvm_arch_init(opaque);
2721        if (r)
2722                goto out_fail;
2723
2724        bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2725
2726        if (bad_page == NULL) {
2727                r = -ENOMEM;
2728                goto out;
2729        }
2730
2731        bad_pfn = page_to_pfn(bad_page);
2732
2733        if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2734                r = -ENOMEM;
2735                goto out_free_0;
2736        }
2737
2738        r = kvm_arch_hardware_setup();
2739        if (r < 0)
2740                goto out_free_0a;
2741
2742        for_each_online_cpu(cpu) {
2743                smp_call_function_single(cpu,
2744                                kvm_arch_check_processor_compat,
2745                                &r, 1);
2746                if (r < 0)
2747                        goto out_free_1;
2748        }
2749
2750        on_each_cpu(hardware_enable, NULL, 1);
2751        r = register_cpu_notifier(&kvm_cpu_notifier);
2752        if (r)
2753                goto out_free_2;
2754        register_reboot_notifier(&kvm_reboot_notifier);
2755
2756        r = sysdev_class_register(&kvm_sysdev_class);
2757        if (r)
2758                goto out_free_3;
2759
2760        r = sysdev_register(&kvm_sysdev);
2761        if (r)
2762                goto out_free_4;
2763
2764        /* A kmem cache lets us meet the alignment requirements of fx_save. */
2765        kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
2766                                           __alignof__(struct kvm_vcpu),
2767                                           0, NULL);
2768        if (!kvm_vcpu_cache) {
2769                r = -ENOMEM;
2770                goto out_free_5;
2771        }
2772
2773        kvm_chardev_ops.owner = module;
2774        kvm_vm_fops.owner = module;
2775        kvm_vcpu_fops.owner = module;
2776
2777        r = misc_register(&kvm_dev);
2778        if (r) {
2779                printk(KERN_ERR "kvm: misc device register failed\n");
2780                goto out_free;
2781        }
2782
2783        kvm_preempt_ops.sched_in = kvm_sched_in;
2784        kvm_preempt_ops.sched_out = kvm_sched_out;
2785
2786        kvm_init_debug();
2787
2788        return 0;
2789
2790out_free:
2791        kmem_cache_destroy(kvm_vcpu_cache);
2792out_free_5:
2793        sysdev_unregister(&kvm_sysdev);
2794out_free_4:
2795        sysdev_class_unregister(&kvm_sysdev_class);
2796out_free_3:
2797        unregister_reboot_notifier(&kvm_reboot_notifier);
2798        unregister_cpu_notifier(&kvm_cpu_notifier);
2799out_free_2:
2800        on_each_cpu(hardware_disable, NULL, 1);
2801out_free_1:
2802        kvm_arch_hardware_unsetup();
2803out_free_0a:
2804        free_cpumask_var(cpus_hardware_enabled);
2805out_free_0:
2806        __free_page(bad_page);
2807out:
2808        kvm_arch_exit();
2809out_fail:
2810        return r;
2811}
2812EXPORT_SYMBOL_GPL(kvm_init);
2813
2814void kvm_exit(void)
2815{
2816        tracepoint_synchronize_unregister();
2817        kvm_exit_debug();
2818        misc_deregister(&kvm_dev);
2819        kmem_cache_destroy(kvm_vcpu_cache);
2820        sysdev_unregister(&kvm_sysdev);
2821        sysdev_class_unregister(&kvm_sysdev_class);
2822        unregister_reboot_notifier(&kvm_reboot_notifier);
2823        unregister_cpu_notifier(&kvm_cpu_notifier);
2824        on_each_cpu(hardware_disable, NULL, 1);
2825        kvm_arch_hardware_unsetup();
2826        kvm_arch_exit();
2827        free_cpumask_var(cpus_hardware_enabled);
2828        __free_page(bad_page);
2829}
2830EXPORT_SYMBOL_GPL(kvm_exit);
2831