linux/arch/s390/kvm/kvm-s390.c
<<
>>
Prefs
   1/*
   2 * hosting zSeries kernel virtual machines
   3 *
   4 * Copyright IBM Corp. 2008, 2009
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License (version 2 only)
   8 * as published by the Free Software Foundation.
   9 *
  10 *    Author(s): Carsten Otte <cotte@de.ibm.com>
  11 *               Christian Borntraeger <borntraeger@de.ibm.com>
  12 *               Heiko Carstens <heiko.carstens@de.ibm.com>
  13 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
  14 */
  15
  16#include <linux/compiler.h>
  17#include <linux/err.h>
  18#include <linux/fs.h>
  19#include <linux/hrtimer.h>
  20#include <linux/init.h>
  21#include <linux/kvm.h>
  22#include <linux/kvm_host.h>
  23#include <linux/module.h>
  24#include <linux/slab.h>
  25#include <linux/timer.h>
  26#include <asm/asm-offsets.h>
  27#include <asm/lowcore.h>
  28#include <asm/pgtable.h>
  29#include <asm/nmi.h>
  30#include <asm/switch_to.h>
  31#include <asm/facility.h>
  32#include <asm/sclp.h>
  33#include "kvm-s390.h"
  34#include "gaccess.h"
  35
  36#define CREATE_TRACE_POINTS
  37#include "trace.h"
  38#include "trace-s390.h"
  39
  40#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  41
  42struct kvm_stats_debugfs_item debugfs_entries[] = {
  43        { "userspace_handled", VCPU_STAT(exit_userspace) },
  44        { "exit_null", VCPU_STAT(exit_null) },
  45        { "exit_validity", VCPU_STAT(exit_validity) },
  46        { "exit_stop_request", VCPU_STAT(exit_stop_request) },
  47        { "exit_external_request", VCPU_STAT(exit_external_request) },
  48        { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
  49        { "exit_instruction", VCPU_STAT(exit_instruction) },
  50        { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
  51        { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
  52        { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
  53        { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
  54        { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
  55        { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
  56        { "instruction_lctl", VCPU_STAT(instruction_lctl) },
  57        { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
  58        { "deliver_external_call", VCPU_STAT(deliver_external_call) },
  59        { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
  60        { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
  61        { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
  62        { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
  63        { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
  64        { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
  65        { "exit_wait_state", VCPU_STAT(exit_wait_state) },
  66        { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
  67        { "instruction_stidp", VCPU_STAT(instruction_stidp) },
  68        { "instruction_spx", VCPU_STAT(instruction_spx) },
  69        { "instruction_stpx", VCPU_STAT(instruction_stpx) },
  70        { "instruction_stap", VCPU_STAT(instruction_stap) },
  71        { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
  72        { "instruction_stsch", VCPU_STAT(instruction_stsch) },
  73        { "instruction_chsc", VCPU_STAT(instruction_chsc) },
  74        { "instruction_stsi", VCPU_STAT(instruction_stsi) },
  75        { "instruction_stfl", VCPU_STAT(instruction_stfl) },
  76        { "instruction_tprot", VCPU_STAT(instruction_tprot) },
  77        { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
  78        { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
  79        { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
  80        { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
  81        { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
  82        { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
  83        { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
  84        { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
  85        { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
  86        { "diagnose_10", VCPU_STAT(diagnose_10) },
  87        { "diagnose_44", VCPU_STAT(diagnose_44) },
  88        { "diagnose_9c", VCPU_STAT(diagnose_9c) },
  89        { NULL }
  90};
  91
  92unsigned long *vfacilities;
  93static struct gmap_notifier gmap_notifier;
  94
  95/* test availability of vfacility */
  96static inline int test_vfacility(unsigned long nr)
  97{
  98        return __test_facility(nr, (void *) vfacilities);
  99}
 100
 101/* Section: not file related */
 102int kvm_arch_hardware_enable(void)
 103{
 104        /* every s390 is virtualization enabled ;-) */
 105        return 0;
 106}
 107
 108void kvm_arch_hardware_disable(void)
 109{
 110}
 111
 112static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
 113
 114int kvm_arch_hardware_setup(void)
 115{
 116        gmap_notifier.notifier_call = kvm_gmap_notifier;
 117        gmap_register_ipte_notifier(&gmap_notifier);
 118        return 0;
 119}
 120
 121void kvm_arch_hardware_unsetup(void)
 122{
 123        gmap_unregister_ipte_notifier(&gmap_notifier);
 124}
 125
 126void kvm_arch_check_processor_compat(void *rtn)
 127{
 128}
 129
 130int kvm_arch_init(void *opaque)
 131{
 132        return 0;
 133}
 134
 135void kvm_arch_exit(void)
 136{
 137}
 138
 139/* Section: device related */
 140long kvm_arch_dev_ioctl(struct file *filp,
 141                        unsigned int ioctl, unsigned long arg)
 142{
 143        if (ioctl == KVM_S390_ENABLE_SIE)
 144                return s390_enable_sie();
 145        return -EINVAL;
 146}
 147
 148int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 149{
 150        int r;
 151
 152        switch (ext) {
 153        case KVM_CAP_S390_PSW:
 154        case KVM_CAP_S390_GMAP:
 155        case KVM_CAP_SYNC_MMU:
 156#ifdef CONFIG_KVM_S390_UCONTROL
 157        case KVM_CAP_S390_UCONTROL:
 158#endif
 159        case KVM_CAP_SYNC_REGS:
 160        case KVM_CAP_ONE_REG:
 161        case KVM_CAP_ENABLE_CAP:
 162        case KVM_CAP_S390_CSS_SUPPORT:
 163        case KVM_CAP_IOEVENTFD:
 164        case KVM_CAP_ENABLE_CAP_VM:
 165                r = 1;
 166                break;
 167        case KVM_CAP_NR_VCPUS:
 168        case KVM_CAP_MAX_VCPUS:
 169                r = KVM_MAX_VCPUS;
 170                break;
 171        case KVM_CAP_NR_MEMSLOTS:
 172                r = KVM_USER_MEM_SLOTS;
 173                break;
 174        case KVM_CAP_S390_COW:
 175                r = MACHINE_HAS_ESOP;
 176                break;
 177        default:
 178                r = 0;
 179        }
 180        return r;
 181}
 182
 183/* Section: vm related */
 184/*
 185 * Get (and clear) the dirty memory log for a memory slot.
 186 */
 187int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 188                               struct kvm_dirty_log *log)
 189{
 190        return 0;
 191}
 192
 193static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
 194{
 195        int r;
 196
 197        if (cap->flags)
 198                return -EINVAL;
 199
 200        switch (cap->cap) {
 201        default:
 202                r = -EINVAL;
 203                break;
 204        }
 205        return r;
 206}
 207
 208long kvm_arch_vm_ioctl(struct file *filp,
 209                       unsigned int ioctl, unsigned long arg)
 210{
 211        struct kvm *kvm = filp->private_data;
 212        void __user *argp = (void __user *)arg;
 213        int r;
 214
 215        switch (ioctl) {
 216        case KVM_S390_INTERRUPT: {
 217                struct kvm_s390_interrupt s390int;
 218
 219                r = -EFAULT;
 220                if (copy_from_user(&s390int, argp, sizeof(s390int)))
 221                        break;
 222                r = kvm_s390_inject_vm(kvm, &s390int);
 223                break;
 224        }
 225        case KVM_ENABLE_CAP: {
 226                struct kvm_enable_cap cap;
 227                r = -EFAULT;
 228                if (copy_from_user(&cap, argp, sizeof(cap)))
 229                        break;
 230                r = kvm_vm_ioctl_enable_cap(kvm, &cap);
 231                break;
 232        }
 233        default:
 234                r = -ENOTTY;
 235        }
 236
 237        return r;
 238}
 239
 240int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 241{
 242        int rc;
 243        char debug_name[16];
 244
 245        rc = -EINVAL;
 246#ifdef CONFIG_KVM_S390_UCONTROL
 247        if (type & ~KVM_VM_S390_UCONTROL)
 248                goto out_err;
 249        if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
 250                goto out_err;
 251#else
 252        if (type)
 253                goto out_err;
 254#endif
 255
 256        rc = s390_enable_sie();
 257        if (rc)
 258                goto out_err;
 259
 260        rc = -ENOMEM;
 261
 262        kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
 263        ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
 264
 265        if (!kvm->arch.sca)
 266                goto out_err;
 267
 268        sprintf(debug_name, "kvm-%u", current->pid);
 269
 270        kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
 271        if (!kvm->arch.dbf)
 272                goto out_nodbf;
 273
 274        spin_lock_init(&kvm->arch.float_int.lock);
 275        INIT_LIST_HEAD(&kvm->arch.float_int.list);
 276
 277        debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
 278        VM_EVENT(kvm, 3, "%s", "vm created");
 279
 280        if (type & KVM_VM_S390_UCONTROL) {
 281                kvm->arch.gmap = NULL;
 282        } else {
 283                kvm->arch.gmap = gmap_alloc(current->mm);
 284                if (!kvm->arch.gmap)
 285                        goto out_nogmap;
 286                kvm->arch.gmap->private = kvm;
 287        }
 288
 289        kvm->arch.css_support = 0;
 290
 291        return 0;
 292out_nogmap:
 293        debug_unregister(kvm->arch.dbf);
 294out_nodbf:
 295        free_page((unsigned long)(kvm->arch.sca));
 296out_err:
 297        return rc;
 298}
 299
 300bool kvm_arch_has_vcpu_debugfs(void)
 301{
 302        return false;
 303}
 304
 305int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
 306{
 307        return 0;
 308}
 309
 310void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 311{
 312        VCPU_EVENT(vcpu, 3, "%s", "free cpu");
 313        trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
 314        if (!kvm_is_ucontrol(vcpu->kvm)) {
 315                clear_bit(63 - vcpu->vcpu_id,
 316                          (unsigned long *) &vcpu->kvm->arch.sca->mcn);
 317                if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
 318                    (__u64) vcpu->arch.sie_block)
 319                        vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
 320        }
 321        smp_mb();
 322
 323        if (kvm_is_ucontrol(vcpu->kvm))
 324                gmap_free(vcpu->arch.gmap);
 325
 326        free_page((unsigned long)(vcpu->arch.sie_block));
 327        kvm_vcpu_uninit(vcpu);
 328        kmem_cache_free(kvm_vcpu_cache, vcpu);
 329}
 330
 331static void kvm_free_vcpus(struct kvm *kvm)
 332{
 333        unsigned int i;
 334        struct kvm_vcpu *vcpu;
 335
 336        kvm_for_each_vcpu(i, vcpu, kvm)
 337                kvm_arch_vcpu_destroy(vcpu);
 338
 339        mutex_lock(&kvm->lock);
 340        for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
 341                kvm->vcpus[i] = NULL;
 342
 343        atomic_set(&kvm->online_vcpus, 0);
 344        mutex_unlock(&kvm->lock);
 345}
 346
 347void kvm_arch_sync_events(struct kvm *kvm)
 348{
 349}
 350
 351void kvm_arch_destroy_vm(struct kvm *kvm)
 352{
 353        kvm_free_vcpus(kvm);
 354        free_page((unsigned long)(kvm->arch.sca));
 355        debug_unregister(kvm->arch.dbf);
 356        if (!kvm_is_ucontrol(kvm))
 357                gmap_free(kvm->arch.gmap);
 358}
 359
 360/* Section: vcpu related */
 361int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 362{
 363        if (kvm_is_ucontrol(vcpu->kvm)) {
 364                vcpu->arch.gmap = gmap_alloc(current->mm);
 365                if (!vcpu->arch.gmap)
 366                        return -ENOMEM;
 367                vcpu->arch.gmap->private = vcpu->kvm;
 368                return 0;
 369        }
 370
 371        vcpu->arch.gmap = vcpu->kvm->arch.gmap;
 372        vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
 373                                    KVM_SYNC_GPRS |
 374                                    KVM_SYNC_ACRS |
 375                                    KVM_SYNC_CRS;
 376        return 0;
 377}
 378
 379void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 380{
 381        /* Nothing todo */
 382}
 383
 384void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
 385{
 386}
 387
 388void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 389{
 390        save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
 391        save_fp_regs(vcpu->arch.host_fpregs.fprs);
 392        save_access_regs(vcpu->arch.host_acrs);
 393        restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
 394        restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
 395        restore_access_regs(vcpu->run->s.regs.acrs);
 396        gmap_enable(vcpu->arch.gmap);
 397        atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 398}
 399
 400void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 401{
 402        atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 403        gmap_disable(vcpu->arch.gmap);
 404        save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
 405        save_fp_regs(vcpu->arch.guest_fpregs.fprs);
 406        save_access_regs(vcpu->run->s.regs.acrs);
 407        restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
 408        restore_fp_regs(vcpu->arch.host_fpregs.fprs);
 409        restore_access_regs(vcpu->arch.host_acrs);
 410}
 411
 412static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
 413{
 414        /* this equals initial cpu reset in pop, but we don't switch to ESA */
 415        vcpu->arch.sie_block->gpsw.mask = 0UL;
 416        vcpu->arch.sie_block->gpsw.addr = 0UL;
 417        kvm_s390_set_prefix(vcpu, 0);
 418        vcpu->arch.sie_block->cputm     = 0UL;
 419        vcpu->arch.sie_block->ckc       = 0UL;
 420        vcpu->arch.sie_block->todpr     = 0;
 421        memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
 422        vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
 423        vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
 424        vcpu->arch.guest_fpregs.fpc = 0;
 425        asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
 426        vcpu->arch.sie_block->gbea = 1;
 427        atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
 428}
 429
 430void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 431{
 432}
 433
 434int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 435{
 436        atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
 437                                                    CPUSTAT_SM |
 438                                                    CPUSTAT_STOPPED |
 439                                                    CPUSTAT_GED);
 440        vcpu->arch.sie_block->ecb   = 6;
 441        vcpu->arch.sie_block->ecb2  = 8;
 442        vcpu->arch.sie_block->eca   = 0xC1002000U;
 443        if (sclp_has_siif())
 444                vcpu->arch.sie_block->eca |= 1;
 445        vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
 446        hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
 447        tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
 448                     (unsigned long) vcpu);
 449        vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
 450        get_cpu_id(&vcpu->arch.cpu_id);
 451        vcpu->arch.cpu_id.version = 0xff;
 452        vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
 453        return 0;
 454}
 455
 456struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 457                                      unsigned int id)
 458{
 459        struct kvm_vcpu *vcpu;
 460        int rc = -EINVAL;
 461
 462        if (id >= KVM_MAX_VCPUS)
 463                goto out;
 464
 465        rc = -ENOMEM;
 466
 467        vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
 468        if (!vcpu)
 469                goto out;
 470
 471        vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
 472                                        get_zeroed_page(GFP_KERNEL);
 473
 474        if (!vcpu->arch.sie_block)
 475                goto out_free_cpu;
 476
 477        vcpu->arch.sie_block->icpua = id;
 478        if (!kvm_is_ucontrol(kvm)) {
 479                if (!kvm->arch.sca) {
 480                        WARN_ON_ONCE(1);
 481                        goto out_free_cpu;
 482                }
 483                if (!kvm->arch.sca->cpu[id].sda)
 484                        kvm->arch.sca->cpu[id].sda =
 485                                (__u64) vcpu->arch.sie_block;
 486                vcpu->arch.sie_block->scaoh =
 487                        (__u32)(((__u64)kvm->arch.sca) >> 32);
 488                vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
 489                set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
 490        }
 491
 492        spin_lock_init(&vcpu->arch.local_int.lock);
 493        INIT_LIST_HEAD(&vcpu->arch.local_int.list);
 494        vcpu->arch.local_int.float_int = &kvm->arch.float_int;
 495        spin_lock(&kvm->arch.float_int.lock);
 496        kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
 497        vcpu->arch.local_int.wq = &vcpu->wq;
 498        vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
 499        spin_unlock(&kvm->arch.float_int.lock);
 500
 501        rc = kvm_vcpu_init(vcpu, kvm, id);
 502        if (rc)
 503                goto out_free_sie_block;
 504        VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
 505                 vcpu->arch.sie_block);
 506        trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
 507
 508        return vcpu;
 509out_free_sie_block:
 510        free_page((unsigned long)(vcpu->arch.sie_block));
 511out_free_cpu:
 512        kmem_cache_free(kvm_vcpu_cache, vcpu);
 513out:
 514        return ERR_PTR(rc);
 515}
 516
 517int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 518{
 519        /* kvm common code refers to this, but never calls it */
 520        BUG();
 521        return 0;
 522}
 523
 524void s390_vcpu_block(struct kvm_vcpu *vcpu)
 525{
 526        atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
 527}
 528
 529void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
 530{
 531        atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
 532}
 533
 534/*
 535 * Kick a guest cpu out of SIE and wait until SIE is not running.
 536 * If the CPU is not running (e.g. waiting as idle) the function will
 537 * return immediately. */
 538void exit_sie(struct kvm_vcpu *vcpu)
 539{
 540        atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
 541        while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
 542                cpu_relax();
 543}
 544
 545/* Kick a guest cpu out of SIE and prevent SIE-reentry */
 546void exit_sie_sync(struct kvm_vcpu *vcpu)
 547{
 548        s390_vcpu_block(vcpu);
 549        exit_sie(vcpu);
 550}
 551
 552static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
 553{
 554        int i;
 555        struct kvm *kvm = gmap->private;
 556        struct kvm_vcpu *vcpu;
 557
 558        kvm_for_each_vcpu(i, vcpu, kvm) {
 559                /* match against both prefix pages */
 560                if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
 561                        VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
 562                        kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
 563                        exit_sie_sync(vcpu);
 564                }
 565        }
 566}
 567
 568int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 569{
 570        /* kvm common code refers to this, but never calls it */
 571        BUG();
 572        return 0;
 573}
 574
 575static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
 576                                           struct kvm_one_reg *reg)
 577{
 578        int r = -EINVAL;
 579
 580        switch (reg->id) {
 581        case KVM_REG_S390_TODPR:
 582                r = put_user(vcpu->arch.sie_block->todpr,
 583                             (u32 __user *)reg->addr);
 584                break;
 585        case KVM_REG_S390_EPOCHDIFF:
 586                r = put_user(vcpu->arch.sie_block->epoch,
 587                             (u64 __user *)reg->addr);
 588                break;
 589        case KVM_REG_S390_CPU_TIMER:
 590                r = put_user(vcpu->arch.sie_block->cputm,
 591                             (u64 __user *)reg->addr);
 592                break;
 593        case KVM_REG_S390_CLOCK_COMP:
 594                r = put_user(vcpu->arch.sie_block->ckc,
 595                             (u64 __user *)reg->addr);
 596                break;
 597        default:
 598                break;
 599        }
 600
 601        return r;
 602}
 603
 604static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
 605                                           struct kvm_one_reg *reg)
 606{
 607        int r = -EINVAL;
 608
 609        switch (reg->id) {
 610        case KVM_REG_S390_TODPR:
 611                r = get_user(vcpu->arch.sie_block->todpr,
 612                             (u32 __user *)reg->addr);
 613                break;
 614        case KVM_REG_S390_EPOCHDIFF:
 615                r = get_user(vcpu->arch.sie_block->epoch,
 616                             (u64 __user *)reg->addr);
 617                break;
 618        case KVM_REG_S390_CPU_TIMER:
 619                r = get_user(vcpu->arch.sie_block->cputm,
 620                             (u64 __user *)reg->addr);
 621                break;
 622        case KVM_REG_S390_CLOCK_COMP:
 623                r = get_user(vcpu->arch.sie_block->ckc,
 624                             (u64 __user *)reg->addr);
 625                break;
 626        default:
 627                break;
 628        }
 629
 630        return r;
 631}
 632
 633static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
 634{
 635        kvm_s390_vcpu_initial_reset(vcpu);
 636        return 0;
 637}
 638
 639int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 640{
 641        memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
 642        return 0;
 643}
 644
 645int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 646{
 647        memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
 648        return 0;
 649}
 650
 651int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 652                                  struct kvm_sregs *sregs)
 653{
 654        memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
 655        memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
 656        restore_access_regs(vcpu->run->s.regs.acrs);
 657        return 0;
 658}
 659
 660int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 661                                  struct kvm_sregs *sregs)
 662{
 663        memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
 664        memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
 665        return 0;
 666}
 667
 668int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 669{
 670        if (test_fp_ctl(fpu->fpc))
 671                return -EINVAL;
 672        memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
 673        vcpu->arch.guest_fpregs.fpc = fpu->fpc;
 674        restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
 675        restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
 676        return 0;
 677}
 678
 679int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 680{
 681        memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
 682        fpu->fpc = vcpu->arch.guest_fpregs.fpc;
 683        return 0;
 684}
 685
 686static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
 687{
 688        int rc = 0;
 689
 690        if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
 691                rc = -EBUSY;
 692        else {
 693                vcpu->run->psw_mask = psw.mask;
 694                vcpu->run->psw_addr = psw.addr;
 695        }
 696        return rc;
 697}
 698
 699int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 700                                  struct kvm_translation *tr)
 701{
 702        return -EINVAL; /* not implemented yet */
 703}
 704
 705int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 706                                        struct kvm_guest_debug *dbg)
 707{
 708        return -EINVAL; /* not implemented yet */
 709}
 710
 711int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 712                                    struct kvm_mp_state *mp_state)
 713{
 714        return -EINVAL; /* not implemented yet */
 715}
 716
 717int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 718                                    struct kvm_mp_state *mp_state)
 719{
 720        return -EINVAL; /* not implemented yet */
 721}
 722
 723static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
 724{
 725        /*
 726         * We use MMU_RELOAD just to re-arm the ipte notifier for the
 727         * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
 728         * This ensures that the ipte instruction for this request has
 729         * already finished. We might race against a second unmapper that
 730         * wants to set the blocking bit. Lets just retry the request loop.
 731         */
 732        while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
 733                int rc;
 734                rc = gmap_ipte_notify(vcpu->arch.gmap,
 735                                      vcpu->arch.sie_block->prefix,
 736                                      PAGE_SIZE * 2);
 737                if (rc)
 738                        return rc;
 739                s390_vcpu_unblock(vcpu);
 740        }
 741        return 0;
 742}
 743
 744static int __vcpu_run(struct kvm_vcpu *vcpu)
 745{
 746        int rc;
 747
 748        memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
 749
 750        if (need_resched())
 751                schedule();
 752
 753        if (test_thread_flag(TIF_MCCK_PENDING))
 754                s390_handle_mcck();
 755
 756        if (!kvm_is_ucontrol(vcpu->kvm))
 757                kvm_s390_deliver_pending_interrupts(vcpu);
 758
 759        rc = kvm_s390_handle_requests(vcpu);
 760        if (rc)
 761                return rc;
 762
 763        vcpu->arch.sie_block->icptcode = 0;
 764        VCPU_EVENT(vcpu, 6, "entering sie flags %x",
 765                   atomic_read(&vcpu->arch.sie_block->cpuflags));
 766        trace_kvm_s390_sie_enter(vcpu,
 767                                 atomic_read(&vcpu->arch.sie_block->cpuflags));
 768
 769        /*
 770         * As PF_VCPU will be used in fault handler, between guest_enter
 771         * and guest_exit should be no uaccess.
 772         */
 773        preempt_disable();
 774        kvm_guest_enter();
 775        preempt_enable();
 776        rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
 777        kvm_guest_exit();
 778
 779        VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
 780                   vcpu->arch.sie_block->icptcode);
 781        trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
 782
 783        if (rc > 0)
 784                rc = 0;
 785        if (rc < 0) {
 786                if (kvm_is_ucontrol(vcpu->kvm)) {
 787                        rc = SIE_INTERCEPT_UCONTROL;
 788                } else {
 789                        VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
 790                        trace_kvm_s390_sie_fault(vcpu);
 791                        rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 792                }
 793        }
 794
 795        memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
 796        return rc;
 797}
 798
 799int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 800{
 801        int rc;
 802        sigset_t sigsaved;
 803
 804rerun_vcpu:
 805        if (vcpu->sigset_active)
 806                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 807
 808        atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
 809
 810        BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
 811
 812        switch (kvm_run->exit_reason) {
 813        case KVM_EXIT_S390_SIEIC:
 814        case KVM_EXIT_UNKNOWN:
 815        case KVM_EXIT_INTR:
 816        case KVM_EXIT_S390_RESET:
 817        case KVM_EXIT_S390_UCONTROL:
 818        case KVM_EXIT_S390_TSCH:
 819                break;
 820        default:
 821                BUG();
 822        }
 823
 824        vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
 825        vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
 826        if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
 827                kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
 828                kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
 829        }
 830        if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
 831                kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
 832                memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
 833                kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
 834        }
 835
 836        might_fault();
 837
 838        do {
 839                rc = __vcpu_run(vcpu);
 840                if (rc)
 841                        break;
 842                if (kvm_is_ucontrol(vcpu->kvm))
 843                        rc = -EOPNOTSUPP;
 844                else
 845                        rc = kvm_handle_sie_intercept(vcpu);
 846        } while (!signal_pending(current) && !rc);
 847
 848        if (rc == SIE_INTERCEPT_RERUNVCPU)
 849                goto rerun_vcpu;
 850
 851        if (signal_pending(current) && !rc) {
 852                kvm_run->exit_reason = KVM_EXIT_INTR;
 853                rc = -EINTR;
 854        }
 855
 856#ifdef CONFIG_KVM_S390_UCONTROL
 857        if (rc == SIE_INTERCEPT_UCONTROL) {
 858                kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
 859                kvm_run->s390_ucontrol.trans_exc_code =
 860                        current->thread.gmap_addr;
 861                kvm_run->s390_ucontrol.pgm_code = 0x10;
 862                rc = 0;
 863        }
 864#endif
 865
 866        if (rc == -EOPNOTSUPP) {
 867                /* intercept cannot be handled in-kernel, prepare kvm-run */
 868                kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
 869                kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
 870                kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
 871                kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
 872                rc = 0;
 873        }
 874
 875        if (rc == -EREMOTE) {
 876                /* intercept was handled, but userspace support is needed
 877                 * kvm_run has been prepared by the handler */
 878                rc = 0;
 879        }
 880
 881        kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
 882        kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
 883        kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
 884        memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
 885
 886        if (vcpu->sigset_active)
 887                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 888
 889        vcpu->stat.exit_userspace++;
 890        return rc;
 891}
 892
 893static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
 894                       unsigned long n, int prefix)
 895{
 896        if (prefix)
 897                return copy_to_guest(vcpu, guestdest, from, n);
 898        else
 899                return copy_to_guest_absolute(vcpu, guestdest, from, n);
 900}
 901
 902/*
 903 * store status at address
 904 * we use have two special cases:
 905 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
 906 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
 907 */
 908int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
 909{
 910        unsigned char archmode = 1;
 911        int prefix;
 912
 913        if (addr == KVM_S390_STORE_STATUS_NOADDR) {
 914                if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
 915                        return -EFAULT;
 916                addr = SAVE_AREA_BASE;
 917                prefix = 0;
 918        } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
 919                if (copy_to_guest(vcpu, 163ul, &archmode, 1))
 920                        return -EFAULT;
 921                addr = SAVE_AREA_BASE;
 922                prefix = 1;
 923        } else
 924                prefix = 0;
 925
 926        /*
 927         * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
 928         * copying in vcpu load/put. Lets update our copies before we save
 929         * it into the save area
 930         */
 931        save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
 932        save_fp_regs(vcpu->arch.guest_fpregs.fprs);
 933        save_access_regs(vcpu->run->s.regs.acrs);
 934
 935        if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
 936                        vcpu->arch.guest_fpregs.fprs, 128, prefix))
 937                return -EFAULT;
 938
 939        if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
 940                        vcpu->run->s.regs.gprs, 128, prefix))
 941                return -EFAULT;
 942
 943        if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
 944                        &vcpu->arch.sie_block->gpsw, 16, prefix))
 945                return -EFAULT;
 946
 947        if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
 948                        &vcpu->arch.sie_block->prefix, 4, prefix))
 949                return -EFAULT;
 950
 951        if (__guestcopy(vcpu,
 952                        addr + offsetof(struct save_area, fp_ctrl_reg),
 953                        &vcpu->arch.guest_fpregs.fpc, 4, prefix))
 954                return -EFAULT;
 955
 956        if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
 957                        &vcpu->arch.sie_block->todpr, 4, prefix))
 958                return -EFAULT;
 959
 960        if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
 961                        &vcpu->arch.sie_block->cputm, 8, prefix))
 962                return -EFAULT;
 963
 964        if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
 965                        &vcpu->arch.sie_block->ckc, 8, prefix))
 966                return -EFAULT;
 967
 968        if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
 969                        &vcpu->run->s.regs.acrs, 64, prefix))
 970                return -EFAULT;
 971
 972        if (__guestcopy(vcpu,
 973                        addr + offsetof(struct save_area, ctrl_regs),
 974                        &vcpu->arch.sie_block->gcr, 128, prefix))
 975                return -EFAULT;
 976        return 0;
 977}
 978
 979static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
 980                                     struct kvm_enable_cap *cap)
 981{
 982        int r;
 983
 984        if (cap->flags)
 985                return -EINVAL;
 986
 987        switch (cap->cap) {
 988        case KVM_CAP_S390_CSS_SUPPORT:
 989                if (!vcpu->kvm->arch.css_support) {
 990                        vcpu->kvm->arch.css_support = 1;
 991                        trace_kvm_s390_enable_css(vcpu->kvm);
 992                }
 993                r = 0;
 994                break;
 995        default:
 996                r = -EINVAL;
 997                break;
 998        }
 999        return r;
1000}
1001
1002long kvm_arch_vcpu_ioctl(struct file *filp,
1003                         unsigned int ioctl, unsigned long arg)
1004{
1005        struct kvm_vcpu *vcpu = filp->private_data;
1006        void __user *argp = (void __user *)arg;
1007        long r;
1008
1009        switch (ioctl) {
1010        case KVM_S390_INTERRUPT: {
1011                struct kvm_s390_interrupt s390int;
1012
1013                r = -EFAULT;
1014                if (copy_from_user(&s390int, argp, sizeof(s390int)))
1015                        break;
1016                r = kvm_s390_inject_vcpu(vcpu, &s390int);
1017                break;
1018        }
1019        case KVM_S390_STORE_STATUS:
1020                r = kvm_s390_vcpu_store_status(vcpu, arg);
1021                break;
1022        case KVM_S390_SET_INITIAL_PSW: {
1023                psw_t psw;
1024
1025                r = -EFAULT;
1026                if (copy_from_user(&psw, argp, sizeof(psw)))
1027                        break;
1028                r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1029                break;
1030        }
1031        case KVM_S390_INITIAL_RESET:
1032                r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1033                break;
1034        case KVM_SET_ONE_REG:
1035        case KVM_GET_ONE_REG: {
1036                struct kvm_one_reg reg;
1037                r = -EFAULT;
1038                if (copy_from_user(&reg, argp, sizeof(reg)))
1039                        break;
1040                if (ioctl == KVM_SET_ONE_REG)
1041                        r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1042                else
1043                        r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1044                break;
1045        }
1046#ifdef CONFIG_KVM_S390_UCONTROL
1047        case KVM_S390_UCAS_MAP: {
1048                struct kvm_s390_ucas_mapping ucasmap;
1049
1050                if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1051                        r = -EFAULT;
1052                        break;
1053                }
1054
1055                if (!kvm_is_ucontrol(vcpu->kvm)) {
1056                        r = -EINVAL;
1057                        break;
1058                }
1059
1060                r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1061                                     ucasmap.vcpu_addr, ucasmap.length);
1062                break;
1063        }
1064        case KVM_S390_UCAS_UNMAP: {
1065                struct kvm_s390_ucas_mapping ucasmap;
1066
1067                if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1068                        r = -EFAULT;
1069                        break;
1070                }
1071
1072                if (!kvm_is_ucontrol(vcpu->kvm)) {
1073                        r = -EINVAL;
1074                        break;
1075                }
1076
1077                r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1078                        ucasmap.length);
1079                break;
1080        }
1081#endif
1082        case KVM_S390_VCPU_FAULT: {
1083                r = gmap_fault(arg, vcpu->arch.gmap);
1084                if (!IS_ERR_VALUE(r))
1085                        r = 0;
1086                break;
1087        }
1088        case KVM_ENABLE_CAP:
1089        {
1090                struct kvm_enable_cap cap;
1091                r = -EFAULT;
1092                if (copy_from_user(&cap, argp, sizeof(cap)))
1093                        break;
1094                r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1095                break;
1096        }
1097        default:
1098                r = -ENOTTY;
1099        }
1100        return r;
1101}
1102
1103int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1104{
1105#ifdef CONFIG_KVM_S390_UCONTROL
1106        if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1107                 && (kvm_is_ucontrol(vcpu->kvm))) {
1108                vmf->page = virt_to_page(vcpu->arch.sie_block);
1109                get_page(vmf->page);
1110                return 0;
1111        }
1112#endif
1113        return VM_FAULT_SIGBUS;
1114}
1115
1116void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1117                           struct kvm_memory_slot *dont)
1118{
1119}
1120
1121int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1122                            unsigned long npages)
1123{
1124        return 0;
1125}
1126
1127void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
1128{
1129}
1130
1131/* Section: memory related */
1132int kvm_arch_prepare_memory_region(struct kvm *kvm,
1133                                   struct kvm_memory_slot *memslot,
1134                                   const struct kvm_userspace_memory_region *mem,
1135                                   enum kvm_mr_change change)
1136{
1137        /* A few sanity checks. We can have memory slots which have to be
1138           located/ended at a segment boundary (1MB). The memory in userland is
1139           ok to be fragmented into various different vmas. It is okay to mmap()
1140           and munmap() stuff in this slot after doing this call at any time */
1141
1142        if (mem->userspace_addr & 0xffffful)
1143                return -EINVAL;
1144
1145        if (mem->memory_size & 0xffffful)
1146                return -EINVAL;
1147
1148        return 0;
1149}
1150
1151void kvm_arch_commit_memory_region(struct kvm *kvm,
1152                                const struct kvm_userspace_memory_region *mem,
1153                                const struct kvm_memory_slot *old,
1154                                const struct kvm_memory_slot *new,
1155                                enum kvm_mr_change change)
1156{
1157        int rc;
1158
1159        /* If the basics of the memslot do not change, we do not want
1160         * to update the gmap. Every update causes several unnecessary
1161         * segment translation exceptions. This is usually handled just
1162         * fine by the normal fault handler + gmap, but it will also
1163         * cause faults on the prefix page of running guest CPUs.
1164         */
1165        if (old->userspace_addr == mem->userspace_addr &&
1166            old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1167            old->npages * PAGE_SIZE == mem->memory_size)
1168                return;
1169
1170        rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1171                mem->guest_phys_addr, mem->memory_size);
1172        if (rc)
1173                printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1174        return;
1175}
1176
1177void kvm_arch_flush_shadow_all(struct kvm *kvm)
1178{
1179}
1180
1181void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1182                                   struct kvm_memory_slot *slot)
1183{
1184}
1185
1186static int __init kvm_s390_init(void)
1187{
1188        int ret;
1189
1190        if (!sclp_has_sief2()) {
1191                pr_info("SIE not available\n");
1192                return -ENODEV;
1193        }
1194
1195        ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1196        if (ret)
1197                return ret;
1198
1199        /*
1200         * guests can ask for up to 255+1 double words, we need a full page
1201         * to hold the maximum amount of facilities. On the other hand, we
1202         * only set facilities that are known to work in KVM.
1203         */
1204        vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1205        if (!vfacilities) {
1206                kvm_exit();
1207                return -ENOMEM;
1208        }
1209        memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1210        vfacilities[0] &= 0xff82fff3f47c0000UL;
1211        vfacilities[1] &= 0x001c000000000000UL;
1212        return 0;
1213}
1214
1215static void __exit kvm_s390_exit(void)
1216{
1217        free_page((unsigned long) vfacilities);
1218        kvm_exit();
1219}
1220
1221module_init(kvm_s390_init);
1222module_exit(kvm_s390_exit);
1223