linux/arch/s390/kvm/kvm-s390.c
<<
>>
Prefs
   1/*
   2 * s390host.c --  hosting zSeries kernel virtual machines
   3 *
   4 * Copyright IBM Corp. 2008,2009
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License (version 2 only)
   8 * as published by the Free Software Foundation.
   9 *
  10 *    Author(s): Carsten Otte <cotte@de.ibm.com>
  11 *               Christian Borntraeger <borntraeger@de.ibm.com>
  12 *               Heiko Carstens <heiko.carstens@de.ibm.com>
  13 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
  14 */
  15
  16#include <linux/compiler.h>
  17#include <linux/err.h>
  18#include <linux/fs.h>
  19#include <linux/hrtimer.h>
  20#include <linux/init.h>
  21#include <linux/kvm.h>
  22#include <linux/kvm_host.h>
  23#include <linux/module.h>
  24#include <linux/slab.h>
  25#include <linux/timer.h>
  26#include <asm/asm-offsets.h>
  27#include <asm/lowcore.h>
  28#include <asm/pgtable.h>
  29#include <asm/nmi.h>
  30#include <asm/system.h>
  31#include "kvm-s390.h"
  32#include "gaccess.h"
  33
  34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  35
  36struct kvm_stats_debugfs_item debugfs_entries[] = {
  37        { "userspace_handled", VCPU_STAT(exit_userspace) },
  38        { "exit_null", VCPU_STAT(exit_null) },
  39        { "exit_validity", VCPU_STAT(exit_validity) },
  40        { "exit_stop_request", VCPU_STAT(exit_stop_request) },
  41        { "exit_external_request", VCPU_STAT(exit_external_request) },
  42        { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
  43        { "exit_instruction", VCPU_STAT(exit_instruction) },
  44        { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
  45        { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
  46        { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
  47        { "instruction_lctl", VCPU_STAT(instruction_lctl) },
  48        { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
  49        { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
  50        { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
  51        { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
  52        { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
  53        { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
  54        { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
  55        { "exit_wait_state", VCPU_STAT(exit_wait_state) },
  56        { "instruction_stidp", VCPU_STAT(instruction_stidp) },
  57        { "instruction_spx", VCPU_STAT(instruction_spx) },
  58        { "instruction_stpx", VCPU_STAT(instruction_stpx) },
  59        { "instruction_stap", VCPU_STAT(instruction_stap) },
  60        { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
  61        { "instruction_stsch", VCPU_STAT(instruction_stsch) },
  62        { "instruction_chsc", VCPU_STAT(instruction_chsc) },
  63        { "instruction_stsi", VCPU_STAT(instruction_stsi) },
  64        { "instruction_stfl", VCPU_STAT(instruction_stfl) },
  65        { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
  66        { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
  67        { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
  68        { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
  69        { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
  70        { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
  71        { "diagnose_44", VCPU_STAT(diagnose_44) },
  72        { NULL }
  73};
  74
  75static unsigned long long *facilities;
  76
  77/* Section: not file related */
  78int kvm_arch_hardware_enable(void *garbage)
  79{
  80        /* every s390 is virtualization enabled ;-) */
  81        return 0;
  82}
  83
  84void kvm_arch_hardware_disable(void *garbage)
  85{
  86}
  87
  88int kvm_arch_hardware_setup(void)
  89{
  90        return 0;
  91}
  92
  93void kvm_arch_hardware_unsetup(void)
  94{
  95}
  96
  97void kvm_arch_check_processor_compat(void *rtn)
  98{
  99}
 100
 101int kvm_arch_init(void *opaque)
 102{
 103        return 0;
 104}
 105
 106void kvm_arch_exit(void)
 107{
 108}
 109
 110/* Section: device related */
 111long kvm_arch_dev_ioctl(struct file *filp,
 112                        unsigned int ioctl, unsigned long arg)
 113{
 114        if (ioctl == KVM_S390_ENABLE_SIE)
 115                return s390_enable_sie();
 116        return -EINVAL;
 117}
 118
 119int kvm_dev_ioctl_check_extension(long ext)
 120{
 121        int r;
 122
 123        switch (ext) {
 124        case KVM_CAP_S390_PSW:
 125                r = 1;
 126                break;
 127        default:
 128                r = 0;
 129        }
 130        return r;
 131}
 132
 133/* Section: vm related */
 134/*
 135 * Get (and clear) the dirty memory log for a memory slot.
 136 */
 137int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 138                               struct kvm_dirty_log *log)
 139{
 140        return 0;
 141}
 142
 143long kvm_arch_vm_ioctl(struct file *filp,
 144                       unsigned int ioctl, unsigned long arg)
 145{
 146        struct kvm *kvm = filp->private_data;
 147        void __user *argp = (void __user *)arg;
 148        int r;
 149
 150        switch (ioctl) {
 151        case KVM_S390_INTERRUPT: {
 152                struct kvm_s390_interrupt s390int;
 153
 154                r = -EFAULT;
 155                if (copy_from_user(&s390int, argp, sizeof(s390int)))
 156                        break;
 157                r = kvm_s390_inject_vm(kvm, &s390int);
 158                break;
 159        }
 160        default:
 161                r = -ENOTTY;
 162        }
 163
 164        return r;
 165}
 166
 167int kvm_arch_init_vm(struct kvm *kvm)
 168{
 169        int rc;
 170        char debug_name[16];
 171
 172        rc = s390_enable_sie();
 173        if (rc)
 174                goto out_err;
 175
 176        kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
 177        if (!kvm->arch.sca)
 178                goto out_err;
 179
 180        sprintf(debug_name, "kvm-%u", current->pid);
 181
 182        kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
 183        if (!kvm->arch.dbf)
 184                goto out_nodbf;
 185
 186        spin_lock_init(&kvm->arch.float_int.lock);
 187        INIT_LIST_HEAD(&kvm->arch.float_int.list);
 188
 189        debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
 190        VM_EVENT(kvm, 3, "%s", "vm created");
 191
 192        return 0;
 193out_nodbf:
 194        free_page((unsigned long)(kvm->arch.sca));
 195out_err:
 196        return rc;
 197}
 198
 199void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 200{
 201        VCPU_EVENT(vcpu, 3, "%s", "free cpu");
 202        clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
 203        if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
 204                (__u64) vcpu->arch.sie_block)
 205                vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
 206        smp_mb();
 207        free_page((unsigned long)(vcpu->arch.sie_block));
 208        kvm_vcpu_uninit(vcpu);
 209        kfree(vcpu);
 210}
 211
 212static void kvm_free_vcpus(struct kvm *kvm)
 213{
 214        unsigned int i;
 215        struct kvm_vcpu *vcpu;
 216
 217        kvm_for_each_vcpu(i, vcpu, kvm)
 218                kvm_arch_vcpu_destroy(vcpu);
 219
 220        mutex_lock(&kvm->lock);
 221        for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
 222                kvm->vcpus[i] = NULL;
 223
 224        atomic_set(&kvm->online_vcpus, 0);
 225        mutex_unlock(&kvm->lock);
 226}
 227
 228void kvm_arch_sync_events(struct kvm *kvm)
 229{
 230}
 231
 232void kvm_arch_destroy_vm(struct kvm *kvm)
 233{
 234        kvm_free_vcpus(kvm);
 235        free_page((unsigned long)(kvm->arch.sca));
 236        debug_unregister(kvm->arch.dbf);
 237}
 238
 239/* Section: vcpu related */
 240int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 241{
 242        return 0;
 243}
 244
 245void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 246{
 247        /* Nothing todo */
 248}
 249
 250void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 251{
 252        save_fp_regs(&vcpu->arch.host_fpregs);
 253        save_access_regs(vcpu->arch.host_acrs);
 254        vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
 255        restore_fp_regs(&vcpu->arch.guest_fpregs);
 256        restore_access_regs(vcpu->arch.guest_acrs);
 257}
 258
 259void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 260{
 261        save_fp_regs(&vcpu->arch.guest_fpregs);
 262        save_access_regs(vcpu->arch.guest_acrs);
 263        restore_fp_regs(&vcpu->arch.host_fpregs);
 264        restore_access_regs(vcpu->arch.host_acrs);
 265}
 266
 267static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
 268{
 269        /* this equals initial cpu reset in pop, but we don't switch to ESA */
 270        vcpu->arch.sie_block->gpsw.mask = 0UL;
 271        vcpu->arch.sie_block->gpsw.addr = 0UL;
 272        vcpu->arch.sie_block->prefix    = 0UL;
 273        vcpu->arch.sie_block->ihcpu     = 0xffff;
 274        vcpu->arch.sie_block->cputm     = 0UL;
 275        vcpu->arch.sie_block->ckc       = 0UL;
 276        vcpu->arch.sie_block->todpr     = 0;
 277        memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
 278        vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
 279        vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
 280        vcpu->arch.guest_fpregs.fpc = 0;
 281        asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
 282        vcpu->arch.sie_block->gbea = 1;
 283}
 284
 285int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 286{
 287        atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
 288        set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
 289        vcpu->arch.sie_block->ecb   = 6;
 290        vcpu->arch.sie_block->eca   = 0xC1002001U;
 291        vcpu->arch.sie_block->fac   = (int) (long) facilities;
 292        hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
 293        tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
 294                     (unsigned long) vcpu);
 295        vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
 296        get_cpu_id(&vcpu->arch.cpu_id);
 297        vcpu->arch.cpu_id.version = 0xff;
 298        return 0;
 299}
 300
 301struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 302                                      unsigned int id)
 303{
 304        struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
 305        int rc = -ENOMEM;
 306
 307        if (!vcpu)
 308                goto out_nomem;
 309
 310        vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
 311                                        get_zeroed_page(GFP_KERNEL);
 312
 313        if (!vcpu->arch.sie_block)
 314                goto out_free_cpu;
 315
 316        vcpu->arch.sie_block->icpua = id;
 317        BUG_ON(!kvm->arch.sca);
 318        if (!kvm->arch.sca->cpu[id].sda)
 319                kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
 320        vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
 321        vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
 322        set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
 323
 324        spin_lock_init(&vcpu->arch.local_int.lock);
 325        INIT_LIST_HEAD(&vcpu->arch.local_int.list);
 326        vcpu->arch.local_int.float_int = &kvm->arch.float_int;
 327        spin_lock(&kvm->arch.float_int.lock);
 328        kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
 329        init_waitqueue_head(&vcpu->arch.local_int.wq);
 330        vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
 331        spin_unlock(&kvm->arch.float_int.lock);
 332
 333        rc = kvm_vcpu_init(vcpu, kvm, id);
 334        if (rc)
 335                goto out_free_sie_block;
 336        VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
 337                 vcpu->arch.sie_block);
 338
 339        return vcpu;
 340out_free_sie_block:
 341        free_page((unsigned long)(vcpu->arch.sie_block));
 342out_free_cpu:
 343        kfree(vcpu);
 344out_nomem:
 345        return ERR_PTR(rc);
 346}
 347
 348int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 349{
 350        /* kvm common code refers to this, but never calls it */
 351        BUG();
 352        return 0;
 353}
 354
 355static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
 356{
 357        kvm_s390_vcpu_initial_reset(vcpu);
 358        return 0;
 359}
 360
 361int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 362{
 363        memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
 364        return 0;
 365}
 366
 367int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 368{
 369        memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
 370        return 0;
 371}
 372
 373int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 374                                  struct kvm_sregs *sregs)
 375{
 376        memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
 377        memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
 378        return 0;
 379}
 380
 381int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 382                                  struct kvm_sregs *sregs)
 383{
 384        memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
 385        memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
 386        return 0;
 387}
 388
 389int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 390{
 391        memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
 392        vcpu->arch.guest_fpregs.fpc = fpu->fpc;
 393        return 0;
 394}
 395
 396int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 397{
 398        memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
 399        fpu->fpc = vcpu->arch.guest_fpregs.fpc;
 400        return 0;
 401}
 402
 403static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
 404{
 405        int rc = 0;
 406
 407        if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
 408                rc = -EBUSY;
 409        else {
 410                vcpu->run->psw_mask = psw.mask;
 411                vcpu->run->psw_addr = psw.addr;
 412        }
 413        return rc;
 414}
 415
 416int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 417                                  struct kvm_translation *tr)
 418{
 419        return -EINVAL; /* not implemented yet */
 420}
 421
 422int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 423                                        struct kvm_guest_debug *dbg)
 424{
 425        return -EINVAL; /* not implemented yet */
 426}
 427
 428int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 429                                    struct kvm_mp_state *mp_state)
 430{
 431        return -EINVAL; /* not implemented yet */
 432}
 433
 434int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 435                                    struct kvm_mp_state *mp_state)
 436{
 437        return -EINVAL; /* not implemented yet */
 438}
 439
 440static void __vcpu_run(struct kvm_vcpu *vcpu)
 441{
 442        memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
 443
 444        if (need_resched())
 445                schedule();
 446
 447        if (test_thread_flag(TIF_MCCK_PENDING))
 448                s390_handle_mcck();
 449
 450        kvm_s390_deliver_pending_interrupts(vcpu);
 451
 452        vcpu->arch.sie_block->icptcode = 0;
 453        local_irq_disable();
 454        kvm_guest_enter();
 455        local_irq_enable();
 456        VCPU_EVENT(vcpu, 6, "entering sie flags %x",
 457                   atomic_read(&vcpu->arch.sie_block->cpuflags));
 458        if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
 459                VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
 460                kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 461        }
 462        VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
 463                   vcpu->arch.sie_block->icptcode);
 464        local_irq_disable();
 465        kvm_guest_exit();
 466        local_irq_enable();
 467
 468        memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
 469}
 470
 471int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 472{
 473        int rc;
 474        sigset_t sigsaved;
 475
 476rerun_vcpu:
 477        if (vcpu->requests)
 478                if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
 479                        kvm_s390_vcpu_set_mem(vcpu);
 480
 481        /* verify, that memory has been registered */
 482        if (!vcpu->arch.sie_block->gmslm) {
 483                vcpu_put(vcpu);
 484                VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
 485                return -EINVAL;
 486        }
 487
 488        if (vcpu->sigset_active)
 489                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 490
 491        atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 492
 493        BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
 494
 495        switch (kvm_run->exit_reason) {
 496        case KVM_EXIT_S390_SIEIC:
 497        case KVM_EXIT_UNKNOWN:
 498        case KVM_EXIT_INTR:
 499        case KVM_EXIT_S390_RESET:
 500                break;
 501        default:
 502                BUG();
 503        }
 504
 505        vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
 506        vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
 507
 508        might_fault();
 509
 510        do {
 511                __vcpu_run(vcpu);
 512                rc = kvm_handle_sie_intercept(vcpu);
 513        } while (!signal_pending(current) && !rc);
 514
 515        if (rc == SIE_INTERCEPT_RERUNVCPU)
 516                goto rerun_vcpu;
 517
 518        if (signal_pending(current) && !rc) {
 519                kvm_run->exit_reason = KVM_EXIT_INTR;
 520                rc = -EINTR;
 521        }
 522
 523        if (rc == -EOPNOTSUPP) {
 524                /* intercept cannot be handled in-kernel, prepare kvm-run */
 525                kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
 526                kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
 527                kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
 528                kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
 529                rc = 0;
 530        }
 531
 532        if (rc == -EREMOTE) {
 533                /* intercept was handled, but userspace support is needed
 534                 * kvm_run has been prepared by the handler */
 535                rc = 0;
 536        }
 537
 538        kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
 539        kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
 540
 541        if (vcpu->sigset_active)
 542                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 543
 544        vcpu->stat.exit_userspace++;
 545        return rc;
 546}
 547
 548static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
 549                       unsigned long n, int prefix)
 550{
 551        if (prefix)
 552                return copy_to_guest(vcpu, guestdest, from, n);
 553        else
 554                return copy_to_guest_absolute(vcpu, guestdest, from, n);
 555}
 556
 557/*
 558 * store status at address
 559 * we use have two special cases:
 560 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
 561 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
 562 */
 563int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
 564{
 565        const unsigned char archmode = 1;
 566        int prefix;
 567
 568        if (addr == KVM_S390_STORE_STATUS_NOADDR) {
 569                if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
 570                        return -EFAULT;
 571                addr = SAVE_AREA_BASE;
 572                prefix = 0;
 573        } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
 574                if (copy_to_guest(vcpu, 163ul, &archmode, 1))
 575                        return -EFAULT;
 576                addr = SAVE_AREA_BASE;
 577                prefix = 1;
 578        } else
 579                prefix = 0;
 580
 581        if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
 582                        vcpu->arch.guest_fpregs.fprs, 128, prefix))
 583                return -EFAULT;
 584
 585        if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
 586                        vcpu->arch.guest_gprs, 128, prefix))
 587                return -EFAULT;
 588
 589        if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
 590                        &vcpu->arch.sie_block->gpsw, 16, prefix))
 591                return -EFAULT;
 592
 593        if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
 594                        &vcpu->arch.sie_block->prefix, 4, prefix))
 595                return -EFAULT;
 596
 597        if (__guestcopy(vcpu,
 598                        addr + offsetof(struct save_area, fp_ctrl_reg),
 599                        &vcpu->arch.guest_fpregs.fpc, 4, prefix))
 600                return -EFAULT;
 601
 602        if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
 603                        &vcpu->arch.sie_block->todpr, 4, prefix))
 604                return -EFAULT;
 605
 606        if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
 607                        &vcpu->arch.sie_block->cputm, 8, prefix))
 608                return -EFAULT;
 609
 610        if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
 611                        &vcpu->arch.sie_block->ckc, 8, prefix))
 612                return -EFAULT;
 613
 614        if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
 615                        &vcpu->arch.guest_acrs, 64, prefix))
 616                return -EFAULT;
 617
 618        if (__guestcopy(vcpu,
 619                        addr + offsetof(struct save_area, ctrl_regs),
 620                        &vcpu->arch.sie_block->gcr, 128, prefix))
 621                return -EFAULT;
 622        return 0;
 623}
 624
 625long kvm_arch_vcpu_ioctl(struct file *filp,
 626                         unsigned int ioctl, unsigned long arg)
 627{
 628        struct kvm_vcpu *vcpu = filp->private_data;
 629        void __user *argp = (void __user *)arg;
 630        long r;
 631
 632        switch (ioctl) {
 633        case KVM_S390_INTERRUPT: {
 634                struct kvm_s390_interrupt s390int;
 635
 636                r = -EFAULT;
 637                if (copy_from_user(&s390int, argp, sizeof(s390int)))
 638                        break;
 639                r = kvm_s390_inject_vcpu(vcpu, &s390int);
 640                break;
 641        }
 642        case KVM_S390_STORE_STATUS:
 643                r = kvm_s390_vcpu_store_status(vcpu, arg);
 644                break;
 645        case KVM_S390_SET_INITIAL_PSW: {
 646                psw_t psw;
 647
 648                r = -EFAULT;
 649                if (copy_from_user(&psw, argp, sizeof(psw)))
 650                        break;
 651                r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
 652                break;
 653        }
 654        case KVM_S390_INITIAL_RESET:
 655                r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
 656                break;
 657        default:
 658                r = -EINVAL;
 659        }
 660        return r;
 661}
 662
 663/* Section: memory related */
 664int kvm_arch_prepare_memory_region(struct kvm *kvm,
 665                                   struct kvm_memory_slot *memslot,
 666                                   struct kvm_memory_slot old,
 667                                   struct kvm_userspace_memory_region *mem,
 668                                   int user_alloc)
 669{
 670        /* A few sanity checks. We can have exactly one memory slot which has
 671           to start at guest virtual zero and which has to be located at a
 672           page boundary in userland and which has to end at a page boundary.
 673           The memory in userland is ok to be fragmented into various different
 674           vmas. It is okay to mmap() and munmap() stuff in this slot after
 675           doing this call at any time */
 676
 677        if (mem->slot)
 678                return -EINVAL;
 679
 680        if (mem->guest_phys_addr)
 681                return -EINVAL;
 682
 683        if (mem->userspace_addr & (PAGE_SIZE - 1))
 684                return -EINVAL;
 685
 686        if (mem->memory_size & (PAGE_SIZE - 1))
 687                return -EINVAL;
 688
 689        if (!user_alloc)
 690                return -EINVAL;
 691
 692        return 0;
 693}
 694
 695void kvm_arch_commit_memory_region(struct kvm *kvm,
 696                                struct kvm_userspace_memory_region *mem,
 697                                struct kvm_memory_slot old,
 698                                int user_alloc)
 699{
 700        int i;
 701        struct kvm_vcpu *vcpu;
 702
 703        /* request update of sie control block for all available vcpus */
 704        kvm_for_each_vcpu(i, vcpu, kvm) {
 705                if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
 706                        continue;
 707                kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
 708        }
 709}
 710
 711void kvm_arch_flush_shadow(struct kvm *kvm)
 712{
 713}
 714
 715static int __init kvm_s390_init(void)
 716{
 717        int ret;
 718        ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
 719        if (ret)
 720                return ret;
 721
 722        /*
 723         * guests can ask for up to 255+1 double words, we need a full page
 724         * to hold the maximum amount of facilites. On the other hand, we
 725         * only set facilities that are known to work in KVM.
 726         */
 727        facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
 728        if (!facilities) {
 729                kvm_exit();
 730                return -ENOMEM;
 731        }
 732        memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
 733        facilities[0] &= 0xff00fff3f47c0000ULL;
 734        return 0;
 735}
 736
 737static void __exit kvm_s390_exit(void)
 738{
 739        free_page((unsigned long) facilities);
 740        kvm_exit();
 741}
 742
 743module_init(kvm_s390_init);
 744module_exit(kvm_s390_exit);
 745