linux/arch/s390/kvm/kvm-s390.c
<<
>>
Prefs
   1/*
   2 * hosting zSeries kernel virtual machines
   3 *
   4 * Copyright IBM Corp. 2008, 2009
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License (version 2 only)
   8 * as published by the Free Software Foundation.
   9 *
  10 *    Author(s): Carsten Otte <cotte@de.ibm.com>
  11 *               Christian Borntraeger <borntraeger@de.ibm.com>
  12 *               Heiko Carstens <heiko.carstens@de.ibm.com>
  13 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
  14 *               Jason J. Herne <jjherne@us.ibm.com>
  15 */
  16
  17#include <linux/compiler.h>
  18#include <linux/err.h>
  19#include <linux/fs.h>
  20#include <linux/hrtimer.h>
  21#include <linux/init.h>
  22#include <linux/kvm.h>
  23#include <linux/kvm_host.h>
  24#include <linux/module.h>
  25#include <linux/slab.h>
  26#include <linux/timer.h>
  27#include <asm/asm-offsets.h>
  28#include <asm/lowcore.h>
  29#include <asm/pgtable.h>
  30#include <asm/nmi.h>
  31#include <asm/switch_to.h>
  32#include <asm/facility.h>
  33#include <asm/sclp.h>
  34#include "kvm-s390.h"
  35#include "gaccess.h"
  36
  37#define CREATE_TRACE_POINTS
  38#include "trace.h"
  39#include "trace-s390.h"
  40
  41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  42
  43struct kvm_stats_debugfs_item debugfs_entries[] = {
  44        { "userspace_handled", VCPU_STAT(exit_userspace) },
  45        { "exit_null", VCPU_STAT(exit_null) },
  46        { "exit_validity", VCPU_STAT(exit_validity) },
  47        { "exit_stop_request", VCPU_STAT(exit_stop_request) },
  48        { "exit_external_request", VCPU_STAT(exit_external_request) },
  49        { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
  50        { "exit_instruction", VCPU_STAT(exit_instruction) },
  51        { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
  52        { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
  53        { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
  54        { "instruction_lctl", VCPU_STAT(instruction_lctl) },
  55        { "instruction_stctl", VCPU_STAT(instruction_stctl) },
  56        { "instruction_stctg", VCPU_STAT(instruction_stctg) },
  57        { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
  58        { "deliver_external_call", VCPU_STAT(deliver_external_call) },
  59        { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
  60        { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
  61        { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
  62        { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
  63        { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
  64        { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
  65        { "exit_wait_state", VCPU_STAT(exit_wait_state) },
  66        { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
  67        { "instruction_stidp", VCPU_STAT(instruction_stidp) },
  68        { "instruction_spx", VCPU_STAT(instruction_spx) },
  69        { "instruction_stpx", VCPU_STAT(instruction_stpx) },
  70        { "instruction_stap", VCPU_STAT(instruction_stap) },
  71        { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
  72        { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
  73        { "instruction_stsch", VCPU_STAT(instruction_stsch) },
  74        { "instruction_chsc", VCPU_STAT(instruction_chsc) },
  75        { "instruction_essa", VCPU_STAT(instruction_essa) },
  76        { "instruction_stsi", VCPU_STAT(instruction_stsi) },
  77        { "instruction_stfl", VCPU_STAT(instruction_stfl) },
  78        { "instruction_tprot", VCPU_STAT(instruction_tprot) },
  79        { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
  80        { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
  81        { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
  82        { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
  83        { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
  84        { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
  85        { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
  86        { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
  87        { "diagnose_10", VCPU_STAT(diagnose_10) },
  88        { "diagnose_44", VCPU_STAT(diagnose_44) },
  89        { "diagnose_9c", VCPU_STAT(diagnose_9c) },
  90        { NULL }
  91};
  92
  93unsigned long *vfacilities;
  94static struct gmap_notifier gmap_notifier;
  95
  96/* test availability of vfacility */
  97int test_vfacility(unsigned long nr)
  98{
  99        return __test_facility(nr, (void *) vfacilities);
 100}
 101
 102/* Section: not file related */
 103int kvm_arch_hardware_enable(void *garbage)
 104{
 105        /* every s390 is virtualization enabled ;-) */
 106        return 0;
 107}
 108
 109void kvm_arch_hardware_disable(void *garbage)
 110{
 111}
 112
 113static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
 114
 115int kvm_arch_hardware_setup(void)
 116{
 117        gmap_notifier.notifier_call = kvm_gmap_notifier;
 118        gmap_register_ipte_notifier(&gmap_notifier);
 119        return 0;
 120}
 121
 122void kvm_arch_hardware_unsetup(void)
 123{
 124        gmap_unregister_ipte_notifier(&gmap_notifier);
 125}
 126
 127void kvm_arch_check_processor_compat(void *rtn)
 128{
 129}
 130
 131int kvm_arch_init(void *opaque)
 132{
 133        return 0;
 134}
 135
 136void kvm_arch_exit(void)
 137{
 138}
 139
 140/* Section: device related */
 141long kvm_arch_dev_ioctl(struct file *filp,
 142                        unsigned int ioctl, unsigned long arg)
 143{
 144        if (ioctl == KVM_S390_ENABLE_SIE)
 145                return s390_enable_sie();
 146        return -EINVAL;
 147}
 148
 149int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 150{
 151        int r;
 152
 153        switch (ext) {
 154        case KVM_CAP_S390_PSW:
 155        case KVM_CAP_S390_GMAP:
 156        case KVM_CAP_SYNC_MMU:
 157#ifdef CONFIG_KVM_S390_UCONTROL
 158        case KVM_CAP_S390_UCONTROL:
 159#endif
 160        case KVM_CAP_ASYNC_PF:
 161        case KVM_CAP_SYNC_REGS:
 162        case KVM_CAP_ONE_REG:
 163        case KVM_CAP_ENABLE_CAP:
 164        case KVM_CAP_S390_CSS_SUPPORT:
 165        case KVM_CAP_IRQFD:
 166        case KVM_CAP_IOEVENTFD:
 167        case KVM_CAP_DEVICE_CTRL:
 168        case KVM_CAP_ENABLE_CAP_VM:
 169        case KVM_CAP_S390_IRQCHIP:
 170        case KVM_CAP_VM_ATTRIBUTES:
 171        case KVM_CAP_MP_STATE:
 172                r = 1;
 173                break;
 174        case KVM_CAP_NR_VCPUS:
 175        case KVM_CAP_MAX_VCPUS:
 176                r = KVM_MAX_VCPUS;
 177                break;
 178        case KVM_CAP_NR_MEMSLOTS:
 179                r = KVM_USER_MEM_SLOTS;
 180                break;
 181        case KVM_CAP_S390_COW:
 182                r = MACHINE_HAS_ESOP;
 183                break;
 184        default:
 185                r = 0;
 186        }
 187        return r;
 188}
 189
 190static void kvm_s390_sync_dirty_log(struct kvm *kvm,
 191                                        struct kvm_memory_slot *memslot)
 192{
 193        gfn_t cur_gfn, last_gfn;
 194        unsigned long address;
 195        struct gmap *gmap = kvm->arch.gmap;
 196
 197        down_read(&gmap->mm->mmap_sem);
 198        /* Loop over all guest pages */
 199        last_gfn = memslot->base_gfn + memslot->npages;
 200        for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
 201                address = gfn_to_hva_memslot(memslot, cur_gfn);
 202
 203                if (gmap_test_and_clear_dirty(address, gmap))
 204                        mark_page_dirty(kvm, cur_gfn);
 205        }
 206        up_read(&gmap->mm->mmap_sem);
 207}
 208
 209/* Section: vm related */
 210/*
 211 * Get (and clear) the dirty memory log for a memory slot.
 212 */
 213int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 214                               struct kvm_dirty_log *log)
 215{
 216        int r;
 217        unsigned long n;
 218        struct kvm_memory_slot *memslot;
 219        int is_dirty = 0;
 220
 221        mutex_lock(&kvm->slots_lock);
 222
 223        r = -EINVAL;
 224        if (log->slot >= KVM_USER_MEM_SLOTS)
 225                goto out;
 226
 227        memslot = id_to_memslot(kvm->memslots, log->slot);
 228        r = -ENOENT;
 229        if (!memslot->dirty_bitmap)
 230                goto out;
 231
 232        kvm_s390_sync_dirty_log(kvm, memslot);
 233        r = kvm_get_dirty_log(kvm, log, &is_dirty);
 234        if (r)
 235                goto out;
 236
 237        /* Clear the dirty log */
 238        if (is_dirty) {
 239                n = kvm_dirty_bitmap_bytes(memslot);
 240                memset(memslot->dirty_bitmap, 0, n);
 241        }
 242        r = 0;
 243out:
 244        mutex_unlock(&kvm->slots_lock);
 245        return r;
 246}
 247
 248static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
 249{
 250        int r;
 251
 252        if (cap->flags)
 253                return -EINVAL;
 254
 255        switch (cap->cap) {
 256        case KVM_CAP_S390_IRQCHIP:
 257                kvm->arch.use_irqchip = 1;
 258                r = 0;
 259                break;
 260        default:
 261                r = -EINVAL;
 262                break;
 263        }
 264        return r;
 265}
 266
 267static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
 268{
 269        int ret;
 270        unsigned int idx;
 271        switch (attr->attr) {
 272        case KVM_S390_VM_MEM_ENABLE_CMMA:
 273                ret = -EBUSY;
 274                mutex_lock(&kvm->lock);
 275                if (atomic_read(&kvm->online_vcpus) == 0) {
 276                        kvm->arch.use_cmma = 1;
 277                        ret = 0;
 278                }
 279                mutex_unlock(&kvm->lock);
 280                break;
 281        case KVM_S390_VM_MEM_CLR_CMMA:
 282                mutex_lock(&kvm->lock);
 283                idx = srcu_read_lock(&kvm->srcu);
 284                page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
 285                srcu_read_unlock(&kvm->srcu, idx);
 286                mutex_unlock(&kvm->lock);
 287                ret = 0;
 288                break;
 289        default:
 290                ret = -ENXIO;
 291                break;
 292        }
 293        return ret;
 294}
 295
 296static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
 297{
 298        int ret;
 299
 300        switch (attr->group) {
 301        case KVM_S390_VM_MEM_CTRL:
 302                ret = kvm_s390_mem_control(kvm, attr);
 303                break;
 304        default:
 305                ret = -ENXIO;
 306                break;
 307        }
 308
 309        return ret;
 310}
 311
 312static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
 313{
 314        return -ENXIO;
 315}
 316
 317static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
 318{
 319        int ret;
 320
 321        switch (attr->group) {
 322        case KVM_S390_VM_MEM_CTRL:
 323                switch (attr->attr) {
 324                case KVM_S390_VM_MEM_ENABLE_CMMA:
 325                case KVM_S390_VM_MEM_CLR_CMMA:
 326                        ret = 0;
 327                        break;
 328                default:
 329                        ret = -ENXIO;
 330                        break;
 331                }
 332                break;
 333        default:
 334                ret = -ENXIO;
 335                break;
 336        }
 337
 338        return ret;
 339}
 340
 341long kvm_arch_vm_ioctl(struct file *filp,
 342                       unsigned int ioctl, unsigned long arg)
 343{
 344        struct kvm *kvm = filp->private_data;
 345        void __user *argp = (void __user *)arg;
 346        struct kvm_device_attr attr;
 347        int r;
 348
 349        switch (ioctl) {
 350        case KVM_S390_INTERRUPT: {
 351                struct kvm_s390_interrupt s390int;
 352
 353                r = -EFAULT;
 354                if (copy_from_user(&s390int, argp, sizeof(s390int)))
 355                        break;
 356                r = kvm_s390_inject_vm(kvm, &s390int);
 357                break;
 358        }
 359        case KVM_ENABLE_CAP: {
 360                struct kvm_enable_cap cap;
 361                r = -EFAULT;
 362                if (copy_from_user(&cap, argp, sizeof(cap)))
 363                        break;
 364                r = kvm_vm_ioctl_enable_cap(kvm, &cap);
 365                break;
 366        }
 367        case KVM_CREATE_IRQCHIP: {
 368                struct kvm_irq_routing_entry routing;
 369
 370                r = -EINVAL;
 371                if (kvm->arch.use_irqchip) {
 372                        /* Set up dummy routing. */
 373                        memset(&routing, 0, sizeof(routing));
 374                        kvm_set_irq_routing(kvm, &routing, 0, 0);
 375                        r = 0;
 376                }
 377                break;
 378        }
 379        case KVM_SET_DEVICE_ATTR: {
 380                r = -EFAULT;
 381                if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
 382                        break;
 383                r = kvm_s390_vm_set_attr(kvm, &attr);
 384                break;
 385        }
 386        case KVM_GET_DEVICE_ATTR: {
 387                r = -EFAULT;
 388                if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
 389                        break;
 390                r = kvm_s390_vm_get_attr(kvm, &attr);
 391                break;
 392        }
 393        case KVM_HAS_DEVICE_ATTR: {
 394                r = -EFAULT;
 395                if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
 396                        break;
 397                r = kvm_s390_vm_has_attr(kvm, &attr);
 398                break;
 399        }
 400        default:
 401                r = -ENOTTY;
 402        }
 403
 404        return r;
 405}
 406
 407int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 408{
 409        int rc;
 410        char debug_name[16];
 411        static unsigned long sca_offset;
 412
 413        rc = -EINVAL;
 414#ifdef CONFIG_KVM_S390_UCONTROL
 415        if (type & ~KVM_VM_S390_UCONTROL)
 416                goto out_err;
 417        if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
 418                goto out_err;
 419#else
 420        if (type)
 421                goto out_err;
 422#endif
 423
 424        rc = s390_enable_sie();
 425        if (rc)
 426                goto out_err;
 427
 428        rc = -ENOMEM;
 429
 430        kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
 431        if (!kvm->arch.sca)
 432                goto out_err;
 433        spin_lock(&kvm_lock);
 434        sca_offset = (sca_offset + 16) & 0x7f0;
 435        kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
 436        spin_unlock(&kvm_lock);
 437
 438        sprintf(debug_name, "kvm-%u", current->pid);
 439
 440        kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
 441        if (!kvm->arch.dbf)
 442                goto out_nodbf;
 443
 444        spin_lock_init(&kvm->arch.float_int.lock);
 445        INIT_LIST_HEAD(&kvm->arch.float_int.list);
 446        init_waitqueue_head(&kvm->arch.ipte_wq);
 447
 448        debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
 449        VM_EVENT(kvm, 3, "%s", "vm created");
 450
 451        if (type & KVM_VM_S390_UCONTROL) {
 452                kvm->arch.gmap = NULL;
 453        } else {
 454                kvm->arch.gmap = gmap_alloc(current->mm);
 455                if (!kvm->arch.gmap)
 456                        goto out_nogmap;
 457                kvm->arch.gmap->private = kvm;
 458                kvm->arch.gmap->pfault_enabled = 0;
 459        }
 460
 461        kvm->arch.css_support = 0;
 462        kvm->arch.use_irqchip = 0;
 463
 464        spin_lock_init(&kvm->arch.start_stop_lock);
 465
 466        return 0;
 467out_nogmap:
 468        debug_unregister(kvm->arch.dbf);
 469out_nodbf:
 470        free_page((unsigned long)(kvm->arch.sca));
 471out_err:
 472        return rc;
 473}
 474
 475void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 476{
 477        VCPU_EVENT(vcpu, 3, "%s", "free cpu");
 478        trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
 479        kvm_s390_clear_local_irqs(vcpu);
 480        kvm_clear_async_pf_completion_queue(vcpu);
 481        if (!kvm_is_ucontrol(vcpu->kvm)) {
 482                clear_bit(63 - vcpu->vcpu_id,
 483                          (unsigned long *) &vcpu->kvm->arch.sca->mcn);
 484                if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
 485                    (__u64) vcpu->arch.sie_block)
 486                        vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
 487        }
 488        smp_mb();
 489
 490        if (kvm_is_ucontrol(vcpu->kvm))
 491                gmap_free(vcpu->arch.gmap);
 492
 493        if (kvm_s390_cmma_enabled(vcpu->kvm))
 494                kvm_s390_vcpu_unsetup_cmma(vcpu);
 495        free_page((unsigned long)(vcpu->arch.sie_block));
 496
 497        kvm_vcpu_uninit(vcpu);
 498        kmem_cache_free(kvm_vcpu_cache, vcpu);
 499}
 500
 501static void kvm_free_vcpus(struct kvm *kvm)
 502{
 503        unsigned int i;
 504        struct kvm_vcpu *vcpu;
 505
 506        kvm_for_each_vcpu(i, vcpu, kvm)
 507                kvm_arch_vcpu_destroy(vcpu);
 508
 509        mutex_lock(&kvm->lock);
 510        for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
 511                kvm->vcpus[i] = NULL;
 512
 513        atomic_set(&kvm->online_vcpus, 0);
 514        mutex_unlock(&kvm->lock);
 515}
 516
 517void kvm_arch_sync_events(struct kvm *kvm)
 518{
 519}
 520
 521void kvm_arch_destroy_vm(struct kvm *kvm)
 522{
 523        kvm_free_vcpus(kvm);
 524        free_page((unsigned long)(kvm->arch.sca));
 525        debug_unregister(kvm->arch.dbf);
 526        if (!kvm_is_ucontrol(kvm))
 527                gmap_free(kvm->arch.gmap);
 528        kvm_s390_destroy_adapters(kvm);
 529        kvm_s390_clear_float_irqs(kvm);
 530}
 531
 532/* Section: vcpu related */
 533int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 534{
 535        vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
 536        kvm_clear_async_pf_completion_queue(vcpu);
 537        if (kvm_is_ucontrol(vcpu->kvm)) {
 538                vcpu->arch.gmap = gmap_alloc(current->mm);
 539                if (!vcpu->arch.gmap)
 540                        return -ENOMEM;
 541                vcpu->arch.gmap->private = vcpu->kvm;
 542                return 0;
 543        }
 544
 545        vcpu->arch.gmap = vcpu->kvm->arch.gmap;
 546        vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
 547                                    KVM_SYNC_GPRS |
 548                                    KVM_SYNC_ACRS |
 549                                    KVM_SYNC_CRS;
 550        return 0;
 551}
 552
 553void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 554{
 555        /* Nothing todo */
 556}
 557
 558void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 559{
 560        save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
 561        save_fp_regs(vcpu->arch.host_fpregs.fprs);
 562        save_access_regs(vcpu->arch.host_acrs);
 563        restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
 564        restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
 565        restore_access_regs(vcpu->run->s.regs.acrs);
 566        gmap_enable(vcpu->arch.gmap);
 567        atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 568}
 569
 570void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 571{
 572        atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 573        gmap_disable(vcpu->arch.gmap);
 574        save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
 575        save_fp_regs(vcpu->arch.guest_fpregs.fprs);
 576        save_access_regs(vcpu->run->s.regs.acrs);
 577        restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
 578        restore_fp_regs(vcpu->arch.host_fpregs.fprs);
 579        restore_access_regs(vcpu->arch.host_acrs);
 580}
 581
 582static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
 583{
 584        /* this equals initial cpu reset in pop, but we don't switch to ESA */
 585        vcpu->arch.sie_block->gpsw.mask = 0UL;
 586        vcpu->arch.sie_block->gpsw.addr = 0UL;
 587        kvm_s390_set_prefix(vcpu, 0);
 588        vcpu->arch.sie_block->cputm     = 0UL;
 589        vcpu->arch.sie_block->ckc       = 0UL;
 590        vcpu->arch.sie_block->todpr     = 0;
 591        memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
 592        vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
 593        vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
 594        vcpu->arch.guest_fpregs.fpc = 0;
 595        asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
 596        vcpu->arch.sie_block->gbea = 1;
 597        vcpu->arch.sie_block->pp = 0;
 598        vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
 599        kvm_clear_async_pf_completion_queue(vcpu);
 600        if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
 601                kvm_s390_vcpu_stop(vcpu);
 602        kvm_s390_clear_local_irqs(vcpu);
 603}
 604
 605int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 606{
 607        return 0;
 608}
 609
 610void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
 611{
 612        free_page(vcpu->arch.sie_block->cbrlo);
 613        vcpu->arch.sie_block->cbrlo = 0;
 614}
 615
 616int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
 617{
 618        vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
 619        if (!vcpu->arch.sie_block->cbrlo)
 620                return -ENOMEM;
 621
 622        vcpu->arch.sie_block->ecb2 |= 0x80;
 623        vcpu->arch.sie_block->ecb2 &= ~0x08;
 624        return 0;
 625}
 626
 627int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 628{
 629        int rc = 0;
 630
 631        atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
 632                                                    CPUSTAT_SM |
 633                                                    CPUSTAT_STOPPED |
 634                                                    CPUSTAT_GED);
 635        vcpu->arch.sie_block->ecb   = 6;
 636        if (test_vfacility(50) && test_vfacility(73))
 637                vcpu->arch.sie_block->ecb |= 0x10;
 638
 639        vcpu->arch.sie_block->ecb2  = 8;
 640        vcpu->arch.sie_block->eca   = 0xD1002000U;
 641        if (sclp_has_siif())
 642                vcpu->arch.sie_block->eca |= 1;
 643        vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
 644        vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
 645                                      ICTL_TPROT;
 646
 647        if (kvm_s390_cmma_enabled(vcpu->kvm)) {
 648                rc = kvm_s390_vcpu_setup_cmma(vcpu);
 649                if (rc)
 650                        return rc;
 651        }
 652        hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
 653        vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
 654        get_cpu_id(&vcpu->arch.cpu_id);
 655        vcpu->arch.cpu_id.version = 0xff;
 656        return rc;
 657}
 658
 659struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 660                                      unsigned int id)
 661{
 662        struct kvm_vcpu *vcpu;
 663        struct sie_page *sie_page;
 664        int rc = -EINVAL;
 665
 666        if (id >= KVM_MAX_VCPUS)
 667                goto out;
 668
 669        rc = -ENOMEM;
 670
 671        vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
 672        if (!vcpu)
 673                goto out;
 674
 675        sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
 676        if (!sie_page)
 677                goto out_free_cpu;
 678
 679        vcpu->arch.sie_block = &sie_page->sie_block;
 680        vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
 681
 682        vcpu->arch.sie_block->icpua = id;
 683        if (!kvm_is_ucontrol(kvm)) {
 684                if (!kvm->arch.sca) {
 685                        WARN_ON_ONCE(1);
 686                        goto out_free_cpu;
 687                }
 688                if (!kvm->arch.sca->cpu[id].sda)
 689                        kvm->arch.sca->cpu[id].sda =
 690                                (__u64) vcpu->arch.sie_block;
 691                vcpu->arch.sie_block->scaoh =
 692                        (__u32)(((__u64)kvm->arch.sca) >> 32);
 693                vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
 694                set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
 695        }
 696
 697        spin_lock_init(&vcpu->arch.local_int.lock);
 698        INIT_LIST_HEAD(&vcpu->arch.local_int.list);
 699        vcpu->arch.local_int.float_int = &kvm->arch.float_int;
 700        vcpu->arch.local_int.wq = &vcpu->wq;
 701        vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
 702
 703        rc = kvm_vcpu_init(vcpu, kvm, id);
 704        if (rc)
 705                goto out_free_sie_block;
 706        VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
 707                 vcpu->arch.sie_block);
 708        trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
 709
 710        return vcpu;
 711out_free_sie_block:
 712        free_page((unsigned long)(vcpu->arch.sie_block));
 713out_free_cpu:
 714        kmem_cache_free(kvm_vcpu_cache, vcpu);
 715out:
 716        return ERR_PTR(rc);
 717}
 718
 719int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 720{
 721        return kvm_cpu_has_interrupt(vcpu);
 722}
 723
 724void s390_vcpu_block(struct kvm_vcpu *vcpu)
 725{
 726        atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
 727}
 728
 729void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
 730{
 731        atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
 732}
 733
 734/*
 735 * Kick a guest cpu out of SIE and wait until SIE is not running.
 736 * If the CPU is not running (e.g. waiting as idle) the function will
 737 * return immediately. */
 738void exit_sie(struct kvm_vcpu *vcpu)
 739{
 740        atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
 741        while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
 742                cpu_relax();
 743}
 744
 745/* Kick a guest cpu out of SIE and prevent SIE-reentry */
 746void exit_sie_sync(struct kvm_vcpu *vcpu)
 747{
 748        s390_vcpu_block(vcpu);
 749        exit_sie(vcpu);
 750}
 751
 752static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
 753{
 754        int i;
 755        struct kvm *kvm = gmap->private;
 756        struct kvm_vcpu *vcpu;
 757
 758        kvm_for_each_vcpu(i, vcpu, kvm) {
 759                /* match against both prefix pages */
 760                if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
 761                        VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
 762                        kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
 763                        exit_sie_sync(vcpu);
 764                }
 765        }
 766}
 767
 768int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 769{
 770        /* kvm common code refers to this, but never calls it */
 771        BUG();
 772        return 0;
 773}
 774
 775static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
 776                                           struct kvm_one_reg *reg)
 777{
 778        int r = -EINVAL;
 779
 780        switch (reg->id) {
 781        case KVM_REG_S390_TODPR:
 782                r = put_user(vcpu->arch.sie_block->todpr,
 783                             (u32 __user *)reg->addr);
 784                break;
 785        case KVM_REG_S390_EPOCHDIFF:
 786                r = put_user(vcpu->arch.sie_block->epoch,
 787                             (u64 __user *)reg->addr);
 788                break;
 789        case KVM_REG_S390_CPU_TIMER:
 790                r = put_user(vcpu->arch.sie_block->cputm,
 791                             (u64 __user *)reg->addr);
 792                break;
 793        case KVM_REG_S390_CLOCK_COMP:
 794                r = put_user(vcpu->arch.sie_block->ckc,
 795                             (u64 __user *)reg->addr);
 796                break;
 797        case KVM_REG_S390_PFTOKEN:
 798                r = put_user(vcpu->arch.pfault_token,
 799                             (u64 __user *)reg->addr);
 800                break;
 801        case KVM_REG_S390_PFCOMPARE:
 802                r = put_user(vcpu->arch.pfault_compare,
 803                             (u64 __user *)reg->addr);
 804                break;
 805        case KVM_REG_S390_PFSELECT:
 806                r = put_user(vcpu->arch.pfault_select,
 807                             (u64 __user *)reg->addr);
 808                break;
 809        case KVM_REG_S390_PP:
 810                r = put_user(vcpu->arch.sie_block->pp,
 811                             (u64 __user *)reg->addr);
 812                break;
 813        case KVM_REG_S390_GBEA:
 814                r = put_user(vcpu->arch.sie_block->gbea,
 815                             (u64 __user *)reg->addr);
 816                break;
 817        default:
 818                break;
 819        }
 820
 821        return r;
 822}
 823
 824static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
 825                                           struct kvm_one_reg *reg)
 826{
 827        int r = -EINVAL;
 828
 829        switch (reg->id) {
 830        case KVM_REG_S390_TODPR:
 831                r = get_user(vcpu->arch.sie_block->todpr,
 832                             (u32 __user *)reg->addr);
 833                break;
 834        case KVM_REG_S390_EPOCHDIFF:
 835                r = get_user(vcpu->arch.sie_block->epoch,
 836                             (u64 __user *)reg->addr);
 837                break;
 838        case KVM_REG_S390_CPU_TIMER:
 839                r = get_user(vcpu->arch.sie_block->cputm,
 840                             (u64 __user *)reg->addr);
 841                break;
 842        case KVM_REG_S390_CLOCK_COMP:
 843                r = get_user(vcpu->arch.sie_block->ckc,
 844                             (u64 __user *)reg->addr);
 845                break;
 846        case KVM_REG_S390_PFTOKEN:
 847                r = get_user(vcpu->arch.pfault_token,
 848                             (u64 __user *)reg->addr);
 849                break;
 850        case KVM_REG_S390_PFCOMPARE:
 851                r = get_user(vcpu->arch.pfault_compare,
 852                             (u64 __user *)reg->addr);
 853                break;
 854        case KVM_REG_S390_PFSELECT:
 855                r = get_user(vcpu->arch.pfault_select,
 856                             (u64 __user *)reg->addr);
 857                break;
 858        case KVM_REG_S390_PP:
 859                r = get_user(vcpu->arch.sie_block->pp,
 860                             (u64 __user *)reg->addr);
 861                break;
 862        case KVM_REG_S390_GBEA:
 863                r = get_user(vcpu->arch.sie_block->gbea,
 864                             (u64 __user *)reg->addr);
 865                break;
 866        default:
 867                break;
 868        }
 869
 870        return r;
 871}
 872
 873static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
 874{
 875        kvm_s390_vcpu_initial_reset(vcpu);
 876        return 0;
 877}
 878
 879int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 880{
 881        memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
 882        return 0;
 883}
 884
 885int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 886{
 887        memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
 888        return 0;
 889}
 890
 891int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 892                                  struct kvm_sregs *sregs)
 893{
 894        memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
 895        memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
 896        restore_access_regs(vcpu->run->s.regs.acrs);
 897        return 0;
 898}
 899
 900int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 901                                  struct kvm_sregs *sregs)
 902{
 903        memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
 904        memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
 905        return 0;
 906}
 907
 908int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 909{
 910        if (test_fp_ctl(fpu->fpc))
 911                return -EINVAL;
 912        memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
 913        vcpu->arch.guest_fpregs.fpc = fpu->fpc;
 914        restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
 915        restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
 916        return 0;
 917}
 918
 919int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 920{
 921        memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
 922        fpu->fpc = vcpu->arch.guest_fpregs.fpc;
 923        return 0;
 924}
 925
 926static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
 927{
 928        int rc = 0;
 929
 930        if (!is_vcpu_stopped(vcpu))
 931                rc = -EBUSY;
 932        else {
 933                vcpu->run->psw_mask = psw.mask;
 934                vcpu->run->psw_addr = psw.addr;
 935        }
 936        return rc;
 937}
 938
 939int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 940                                  struct kvm_translation *tr)
 941{
 942        return -EINVAL; /* not implemented yet */
 943}
 944
 945#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
 946                              KVM_GUESTDBG_USE_HW_BP | \
 947                              KVM_GUESTDBG_ENABLE)
 948
 949int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 950                                        struct kvm_guest_debug *dbg)
 951{
 952        int rc = 0;
 953
 954        vcpu->guest_debug = 0;
 955        kvm_s390_clear_bp_data(vcpu);
 956
 957        if (dbg->control & ~VALID_GUESTDBG_FLAGS)
 958                return -EINVAL;
 959
 960        if (dbg->control & KVM_GUESTDBG_ENABLE) {
 961                vcpu->guest_debug = dbg->control;
 962                /* enforce guest PER */
 963                atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
 964
 965                if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
 966                        rc = kvm_s390_import_bp_data(vcpu, dbg);
 967        } else {
 968                atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
 969                vcpu->arch.guestdbg.last_bp = 0;
 970        }
 971
 972        if (rc) {
 973                vcpu->guest_debug = 0;
 974                kvm_s390_clear_bp_data(vcpu);
 975                atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
 976        }
 977
 978        return rc;
 979}
 980
 981int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 982                                    struct kvm_mp_state *mp_state)
 983{
 984        /* CHECK_STOP and LOAD are not supported yet */
 985        return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
 986                                       KVM_MP_STATE_OPERATING;
 987}
 988
 989int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 990                                    struct kvm_mp_state *mp_state)
 991{
 992        int rc = 0;
 993
 994        /* user space knows about this interface - let it control the state */
 995        vcpu->kvm->arch.user_cpu_state_ctrl = 1;
 996
 997        switch (mp_state->mp_state) {
 998        case KVM_MP_STATE_STOPPED:
 999                kvm_s390_vcpu_stop(vcpu);
1000                break;
1001        case KVM_MP_STATE_OPERATING:
1002                kvm_s390_vcpu_start(vcpu);
1003                break;
1004        case KVM_MP_STATE_LOAD:
1005        case KVM_MP_STATE_CHECK_STOP:
1006                /* fall through - CHECK_STOP and LOAD are not supported yet */
1007        default:
1008                rc = -ENXIO;
1009        }
1010
1011        return rc;
1012}
1013
1014bool kvm_s390_cmma_enabled(struct kvm *kvm)
1015{
1016        if (!MACHINE_IS_LPAR)
1017                return false;
1018        /* only enable for z10 and later */
1019        if (!MACHINE_HAS_EDAT1)
1020                return false;
1021        if (!kvm->arch.use_cmma)
1022                return false;
1023        return true;
1024}
1025
1026static bool ibs_enabled(struct kvm_vcpu *vcpu)
1027{
1028        return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1029}
1030
1031static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1032{
1033retry:
1034        s390_vcpu_unblock(vcpu);
1035        /*
1036         * We use MMU_RELOAD just to re-arm the ipte notifier for the
1037         * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1038         * This ensures that the ipte instruction for this request has
1039         * already finished. We might race against a second unmapper that
1040         * wants to set the blocking bit. Lets just retry the request loop.
1041         */
1042        if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1043                int rc;
1044                rc = gmap_ipte_notify(vcpu->arch.gmap,
1045                                      kvm_s390_get_prefix(vcpu),
1046                                      PAGE_SIZE * 2);
1047                if (rc)
1048                        return rc;
1049                goto retry;
1050        }
1051
1052        if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1053                if (!ibs_enabled(vcpu)) {
1054                        trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1055                        atomic_set_mask(CPUSTAT_IBS,
1056                                        &vcpu->arch.sie_block->cpuflags);
1057                }
1058                goto retry;
1059        }
1060
1061        if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1062                if (ibs_enabled(vcpu)) {
1063                        trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1064                        atomic_clear_mask(CPUSTAT_IBS,
1065                                          &vcpu->arch.sie_block->cpuflags);
1066                }
1067                goto retry;
1068        }
1069
1070        /* nothing to do, just clear the request */
1071        clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1072
1073        return 0;
1074}
1075
1076/**
1077 * kvm_arch_fault_in_page - fault-in guest page if necessary
1078 * @vcpu: The corresponding virtual cpu
1079 * @gpa: Guest physical address
1080 * @writable: Whether the page should be writable or not
1081 *
1082 * Make sure that a guest page has been faulted-in on the host.
1083 *
1084 * Return: Zero on success, negative error code otherwise.
1085 */
1086long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
1087{
1088        struct mm_struct *mm = current->mm;
1089        hva_t hva;
1090        long rc;
1091
1092        hva = gmap_fault(gpa, vcpu->arch.gmap);
1093        if (IS_ERR_VALUE(hva))
1094                return (long)hva;
1095        down_read(&mm->mmap_sem);
1096        rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
1097        up_read(&mm->mmap_sem);
1098
1099        return rc < 0 ? rc : 0;
1100}
1101
1102static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1103                                      unsigned long token)
1104{
1105        struct kvm_s390_interrupt inti;
1106        inti.parm64 = token;
1107
1108        if (start_token) {
1109                inti.type = KVM_S390_INT_PFAULT_INIT;
1110                WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1111        } else {
1112                inti.type = KVM_S390_INT_PFAULT_DONE;
1113                WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1114        }
1115}
1116
1117void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1118                                     struct kvm_async_pf *work)
1119{
1120        trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1121        __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1122}
1123
1124void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1125                                 struct kvm_async_pf *work)
1126{
1127        trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1128        __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1129}
1130
1131void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1132                               struct kvm_async_pf *work)
1133{
1134        /* s390 will always inject the page directly */
1135}
1136
1137bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1138{
1139        /*
1140         * s390 will always inject the page directly,
1141         * but we still want check_async_completion to cleanup
1142         */
1143        return true;
1144}
1145
1146static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1147{
1148        hva_t hva;
1149        struct kvm_arch_async_pf arch;
1150        int rc;
1151
1152        if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1153                return 0;
1154        if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1155            vcpu->arch.pfault_compare)
1156                return 0;
1157        if (psw_extint_disabled(vcpu))
1158                return 0;
1159        if (kvm_cpu_has_interrupt(vcpu))
1160                return 0;
1161        if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1162                return 0;
1163        if (!vcpu->arch.gmap->pfault_enabled)
1164                return 0;
1165
1166        hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1167        hva += current->thread.gmap_addr & ~PAGE_MASK;
1168        if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
1169                return 0;
1170
1171        rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1172        return rc;
1173}
1174
1175static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1176{
1177        int rc, cpuflags;
1178
1179        /*
1180         * On s390 notifications for arriving pages will be delivered directly
1181         * to the guest but the house keeping for completed pfaults is
1182         * handled outside the worker.
1183         */
1184        kvm_check_async_pf_completion(vcpu);
1185
1186        memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1187
1188        if (need_resched())
1189                schedule();
1190
1191        if (test_cpu_flag(CIF_MCCK_PENDING))
1192                s390_handle_mcck();
1193
1194        if (!kvm_is_ucontrol(vcpu->kvm))
1195                kvm_s390_deliver_pending_interrupts(vcpu);
1196
1197        rc = kvm_s390_handle_requests(vcpu);
1198        if (rc)
1199                return rc;
1200
1201        if (guestdbg_enabled(vcpu)) {
1202                kvm_s390_backup_guest_per_regs(vcpu);
1203                kvm_s390_patch_guest_per_regs(vcpu);
1204        }
1205
1206        vcpu->arch.sie_block->icptcode = 0;
1207        cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1208        VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1209        trace_kvm_s390_sie_enter(vcpu, cpuflags);
1210
1211        return 0;
1212}
1213
1214static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1215{
1216        int rc = -1;
1217
1218        VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1219                   vcpu->arch.sie_block->icptcode);
1220        trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1221
1222        if (guestdbg_enabled(vcpu))
1223                kvm_s390_restore_guest_per_regs(vcpu);
1224
1225        if (exit_reason >= 0) {
1226                rc = 0;
1227        } else if (kvm_is_ucontrol(vcpu->kvm)) {
1228                vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1229                vcpu->run->s390_ucontrol.trans_exc_code =
1230                                                current->thread.gmap_addr;
1231                vcpu->run->s390_ucontrol.pgm_code = 0x10;
1232                rc = -EREMOTE;
1233
1234        } else if (current->thread.gmap_pfault) {
1235                trace_kvm_s390_major_guest_pfault(vcpu);
1236                current->thread.gmap_pfault = 0;
1237                if (kvm_arch_setup_async_pf(vcpu)) {
1238                        rc = 0;
1239                } else {
1240                        gpa_t gpa = current->thread.gmap_addr;
1241                        rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1242                }
1243        }
1244
1245        if (rc == -1) {
1246                VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1247                trace_kvm_s390_sie_fault(vcpu);
1248                rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1249        }
1250
1251        memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
1252
1253        if (rc == 0) {
1254                if (kvm_is_ucontrol(vcpu->kvm))
1255                        /* Don't exit for host interrupts. */
1256                        rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1257                else
1258                        rc = kvm_handle_sie_intercept(vcpu);
1259        }
1260
1261        return rc;
1262}
1263
1264static int __vcpu_run(struct kvm_vcpu *vcpu)
1265{
1266        int rc, exit_reason;
1267
1268        /*
1269         * We try to hold kvm->srcu during most of vcpu_run (except when run-
1270         * ning the guest), so that memslots (and other stuff) are protected
1271         */
1272        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1273
1274        do {
1275                rc = vcpu_pre_run(vcpu);
1276                if (rc)
1277                        break;
1278
1279                srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1280                /*
1281                 * As PF_VCPU will be used in fault handler, between
1282                 * guest_enter and guest_exit should be no uaccess.
1283                 */
1284                preempt_disable();
1285                kvm_guest_enter();
1286                preempt_enable();
1287                exit_reason = sie64a(vcpu->arch.sie_block,
1288                                     vcpu->run->s.regs.gprs);
1289                kvm_guest_exit();
1290                vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1291
1292                rc = vcpu_post_run(vcpu, exit_reason);
1293        } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
1294
1295        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1296        return rc;
1297}
1298
1299int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1300{
1301        int rc;
1302        sigset_t sigsaved;
1303
1304        if (guestdbg_exit_pending(vcpu)) {
1305                kvm_s390_prepare_debug_exit(vcpu);
1306                return 0;
1307        }
1308
1309        if (vcpu->sigset_active)
1310                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1311
1312        if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1313                kvm_s390_vcpu_start(vcpu);
1314        } else if (is_vcpu_stopped(vcpu)) {
1315                pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1316                                   vcpu->vcpu_id);
1317                return -EINVAL;
1318        }
1319
1320        vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1321        vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1322        if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
1323                kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
1324                kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1325        }
1326        if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1327                kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
1328                memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1329                kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1330        }
1331
1332        might_fault();
1333        rc = __vcpu_run(vcpu);
1334
1335        if (signal_pending(current) && !rc) {
1336                kvm_run->exit_reason = KVM_EXIT_INTR;
1337                rc = -EINTR;
1338        }
1339
1340        if (guestdbg_exit_pending(vcpu) && !rc)  {
1341                kvm_s390_prepare_debug_exit(vcpu);
1342                rc = 0;
1343        }
1344
1345        if (rc == -EOPNOTSUPP) {
1346                /* intercept cannot be handled in-kernel, prepare kvm-run */
1347                kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
1348                kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
1349                kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
1350                kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
1351                rc = 0;
1352        }
1353
1354        if (rc == -EREMOTE) {
1355                /* intercept was handled, but userspace support is needed
1356                 * kvm_run has been prepared by the handler */
1357                rc = 0;
1358        }
1359
1360        kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
1361        kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
1362        kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1363        memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1364
1365        if (vcpu->sigset_active)
1366                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1367
1368        vcpu->stat.exit_userspace++;
1369        return rc;
1370}
1371
1372/*
1373 * store status at address
1374 * we use have two special cases:
1375 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1376 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1377 */
1378int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1379{
1380        unsigned char archmode = 1;
1381        unsigned int px;
1382        u64 clkcomp;
1383        int rc;
1384
1385        if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1386                if (write_guest_abs(vcpu, 163, &archmode, 1))
1387                        return -EFAULT;
1388                gpa = SAVE_AREA_BASE;
1389        } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1390                if (write_guest_real(vcpu, 163, &archmode, 1))
1391                        return -EFAULT;
1392                gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1393        }
1394        rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1395                             vcpu->arch.guest_fpregs.fprs, 128);
1396        rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1397                              vcpu->run->s.regs.gprs, 128);
1398        rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1399                              &vcpu->arch.sie_block->gpsw, 16);
1400        px = kvm_s390_get_prefix(vcpu);
1401        rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1402                              &px, 4);
1403        rc |= write_guest_abs(vcpu,
1404                              gpa + offsetof(struct save_area, fp_ctrl_reg),
1405                              &vcpu->arch.guest_fpregs.fpc, 4);
1406        rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1407                              &vcpu->arch.sie_block->todpr, 4);
1408        rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1409                              &vcpu->arch.sie_block->cputm, 8);
1410        clkcomp = vcpu->arch.sie_block->ckc >> 8;
1411        rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1412                              &clkcomp, 8);
1413        rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1414                              &vcpu->run->s.regs.acrs, 64);
1415        rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1416                              &vcpu->arch.sie_block->gcr, 128);
1417        return rc ? -EFAULT : 0;
1418}
1419
1420int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1421{
1422        /*
1423         * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1424         * copying in vcpu load/put. Lets update our copies before we save
1425         * it into the save area
1426         */
1427        save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1428        save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1429        save_access_regs(vcpu->run->s.regs.acrs);
1430
1431        return kvm_s390_store_status_unloaded(vcpu, addr);
1432}
1433
1434static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1435{
1436        kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1437        kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1438        exit_sie_sync(vcpu);
1439}
1440
1441static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1442{
1443        unsigned int i;
1444        struct kvm_vcpu *vcpu;
1445
1446        kvm_for_each_vcpu(i, vcpu, kvm) {
1447                __disable_ibs_on_vcpu(vcpu);
1448        }
1449}
1450
1451static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1452{
1453        kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1454        kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1455        exit_sie_sync(vcpu);
1456}
1457
1458void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1459{
1460        int i, online_vcpus, started_vcpus = 0;
1461
1462        if (!is_vcpu_stopped(vcpu))
1463                return;
1464
1465        trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
1466        /* Only one cpu at a time may enter/leave the STOPPED state. */
1467        spin_lock(&vcpu->kvm->arch.start_stop_lock);
1468        online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1469
1470        for (i = 0; i < online_vcpus; i++) {
1471                if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1472                        started_vcpus++;
1473        }
1474
1475        if (started_vcpus == 0) {
1476                /* we're the only active VCPU -> speed it up */
1477                __enable_ibs_on_vcpu(vcpu);
1478        } else if (started_vcpus == 1) {
1479                /*
1480                 * As we are starting a second VCPU, we have to disable
1481                 * the IBS facility on all VCPUs to remove potentially
1482                 * oustanding ENABLE requests.
1483                 */
1484                __disable_ibs_on_all_vcpus(vcpu->kvm);
1485        }
1486
1487        atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1488        /*
1489         * Another VCPU might have used IBS while we were offline.
1490         * Let's play safe and flush the VCPU at startup.
1491         */
1492        vcpu->arch.sie_block->ihcpu  = 0xffff;
1493        spin_unlock(&vcpu->kvm->arch.start_stop_lock);
1494        return;
1495}
1496
1497void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1498{
1499        int i, online_vcpus, started_vcpus = 0;
1500        struct kvm_vcpu *started_vcpu = NULL;
1501
1502        if (is_vcpu_stopped(vcpu))
1503                return;
1504
1505        trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
1506        /* Only one cpu at a time may enter/leave the STOPPED state. */
1507        spin_lock(&vcpu->kvm->arch.start_stop_lock);
1508        online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1509
1510        /* Need to lock access to action_bits to avoid a SIGP race condition */
1511        spin_lock(&vcpu->arch.local_int.lock);
1512        atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1513
1514        /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1515        vcpu->arch.local_int.action_bits &=
1516                                 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
1517        spin_unlock(&vcpu->arch.local_int.lock);
1518
1519        __disable_ibs_on_vcpu(vcpu);
1520
1521        for (i = 0; i < online_vcpus; i++) {
1522                if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1523                        started_vcpus++;
1524                        started_vcpu = vcpu->kvm->vcpus[i];
1525                }
1526        }
1527
1528        if (started_vcpus == 1) {
1529                /*
1530                 * As we only have one VCPU left, we want to enable the
1531                 * IBS facility for that VCPU to speed it up.
1532                 */
1533                __enable_ibs_on_vcpu(started_vcpu);
1534        }
1535
1536        spin_unlock(&vcpu->kvm->arch.start_stop_lock);
1537        return;
1538}
1539
1540static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1541                                     struct kvm_enable_cap *cap)
1542{
1543        int r;
1544
1545        if (cap->flags)
1546                return -EINVAL;
1547
1548        switch (cap->cap) {
1549        case KVM_CAP_S390_CSS_SUPPORT:
1550                if (!vcpu->kvm->arch.css_support) {
1551                        vcpu->kvm->arch.css_support = 1;
1552                        trace_kvm_s390_enable_css(vcpu->kvm);
1553                }
1554                r = 0;
1555                break;
1556        default:
1557                r = -EINVAL;
1558                break;
1559        }
1560        return r;
1561}
1562
1563long kvm_arch_vcpu_ioctl(struct file *filp,
1564                         unsigned int ioctl, unsigned long arg)
1565{
1566        struct kvm_vcpu *vcpu = filp->private_data;
1567        void __user *argp = (void __user *)arg;
1568        int idx;
1569        long r;
1570
1571        switch (ioctl) {
1572        case KVM_S390_INTERRUPT: {
1573                struct kvm_s390_interrupt s390int;
1574
1575                r = -EFAULT;
1576                if (copy_from_user(&s390int, argp, sizeof(s390int)))
1577                        break;
1578                r = kvm_s390_inject_vcpu(vcpu, &s390int);
1579                break;
1580        }
1581        case KVM_S390_STORE_STATUS:
1582                idx = srcu_read_lock(&vcpu->kvm->srcu);
1583                r = kvm_s390_vcpu_store_status(vcpu, arg);
1584                srcu_read_unlock(&vcpu->kvm->srcu, idx);
1585                break;
1586        case KVM_S390_SET_INITIAL_PSW: {
1587                psw_t psw;
1588
1589                r = -EFAULT;
1590                if (copy_from_user(&psw, argp, sizeof(psw)))
1591                        break;
1592                r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1593                break;
1594        }
1595        case KVM_S390_INITIAL_RESET:
1596                r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1597                break;
1598        case KVM_SET_ONE_REG:
1599        case KVM_GET_ONE_REG: {
1600                struct kvm_one_reg reg;
1601                r = -EFAULT;
1602                if (copy_from_user(&reg, argp, sizeof(reg)))
1603                        break;
1604                if (ioctl == KVM_SET_ONE_REG)
1605                        r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1606                else
1607                        r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1608                break;
1609        }
1610#ifdef CONFIG_KVM_S390_UCONTROL
1611        case KVM_S390_UCAS_MAP: {
1612                struct kvm_s390_ucas_mapping ucasmap;
1613
1614                if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1615                        r = -EFAULT;
1616                        break;
1617                }
1618
1619                if (!kvm_is_ucontrol(vcpu->kvm)) {
1620                        r = -EINVAL;
1621                        break;
1622                }
1623
1624                r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1625                                     ucasmap.vcpu_addr, ucasmap.length);
1626                break;
1627        }
1628        case KVM_S390_UCAS_UNMAP: {
1629                struct kvm_s390_ucas_mapping ucasmap;
1630
1631                if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1632                        r = -EFAULT;
1633                        break;
1634                }
1635
1636                if (!kvm_is_ucontrol(vcpu->kvm)) {
1637                        r = -EINVAL;
1638                        break;
1639                }
1640
1641                r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1642                        ucasmap.length);
1643                break;
1644        }
1645#endif
1646        case KVM_S390_VCPU_FAULT: {
1647                r = gmap_fault(arg, vcpu->arch.gmap);
1648                if (!IS_ERR_VALUE(r))
1649                        r = 0;
1650                break;
1651        }
1652        case KVM_ENABLE_CAP:
1653        {
1654                struct kvm_enable_cap cap;
1655                r = -EFAULT;
1656                if (copy_from_user(&cap, argp, sizeof(cap)))
1657                        break;
1658                r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1659                break;
1660        }
1661        default:
1662                r = -ENOTTY;
1663        }
1664        return r;
1665}
1666
1667int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1668{
1669#ifdef CONFIG_KVM_S390_UCONTROL
1670        if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1671                 && (kvm_is_ucontrol(vcpu->kvm))) {
1672                vmf->page = virt_to_page(vcpu->arch.sie_block);
1673                get_page(vmf->page);
1674                return 0;
1675        }
1676#endif
1677        return VM_FAULT_SIGBUS;
1678}
1679
1680void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1681                           struct kvm_memory_slot *dont)
1682{
1683}
1684
1685int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1686                            unsigned long npages)
1687{
1688        return 0;
1689}
1690
1691void kvm_arch_memslots_updated(struct kvm *kvm)
1692{
1693}
1694
1695/* Section: memory related */
1696int kvm_arch_prepare_memory_region(struct kvm *kvm,
1697                                   struct kvm_memory_slot *memslot,
1698                                   struct kvm_userspace_memory_region *mem,
1699                                   enum kvm_mr_change change)
1700{
1701        /* A few sanity checks. We can have memory slots which have to be
1702           located/ended at a segment boundary (1MB). The memory in userland is
1703           ok to be fragmented into various different vmas. It is okay to mmap()
1704           and munmap() stuff in this slot after doing this call at any time */
1705
1706        if (mem->userspace_addr & 0xffffful)
1707                return -EINVAL;
1708
1709        if (mem->memory_size & 0xffffful)
1710                return -EINVAL;
1711
1712        return 0;
1713}
1714
1715void kvm_arch_commit_memory_region(struct kvm *kvm,
1716                                struct kvm_userspace_memory_region *mem,
1717                                const struct kvm_memory_slot *old,
1718                                enum kvm_mr_change change)
1719{
1720        int rc;
1721
1722        /* If the basics of the memslot do not change, we do not want
1723         * to update the gmap. Every update causes several unnecessary
1724         * segment translation exceptions. This is usually handled just
1725         * fine by the normal fault handler + gmap, but it will also
1726         * cause faults on the prefix page of running guest CPUs.
1727         */
1728        if (old->userspace_addr == mem->userspace_addr &&
1729            old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1730            old->npages * PAGE_SIZE == mem->memory_size)
1731                return;
1732
1733        rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1734                mem->guest_phys_addr, mem->memory_size);
1735        if (rc)
1736                printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1737        return;
1738}
1739
1740void kvm_arch_flush_shadow_all(struct kvm *kvm)
1741{
1742}
1743
1744void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1745                                   struct kvm_memory_slot *slot)
1746{
1747}
1748
1749static int __init kvm_s390_init(void)
1750{
1751        int ret;
1752        ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1753        if (ret)
1754                return ret;
1755
1756        /*
1757         * guests can ask for up to 255+1 double words, we need a full page
1758         * to hold the maximum amount of facilities. On the other hand, we
1759         * only set facilities that are known to work in KVM.
1760         */
1761        vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1762        if (!vfacilities) {
1763                kvm_exit();
1764                return -ENOMEM;
1765        }
1766        memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1767        vfacilities[0] &= 0xff82fff3f4fc2000UL;
1768        vfacilities[1] &= 0x005c000000000000UL;
1769        return 0;
1770}
1771
1772static void __exit kvm_s390_exit(void)
1773{
1774        free_page((unsigned long) vfacilities);
1775        kvm_exit();
1776}
1777
1778module_init(kvm_s390_init);
1779module_exit(kvm_s390_exit);
1780
1781/*
1782 * Enable autoloading of the kvm module.
1783 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1784 * since x86 takes a different approach.
1785 */
1786#include <linux/miscdevice.h>
1787MODULE_ALIAS_MISCDEV(KVM_MINOR);
1788MODULE_ALIAS("devname:kvm");
1789