1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
19#include <linux/hrtimer.h>
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/timer.h>
26#include <asm/lowcore.h>
27#include <asm/pgtable.h>
28#include <asm/nmi.h>
29#include <asm/system.h>
30#include "kvm-s390.h"
31#include "gaccess.h"
32
33#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
34
35struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { "userspace_handled", VCPU_STAT(exit_userspace) },
37 { "exit_null", VCPU_STAT(exit_null) },
38 { "exit_validity", VCPU_STAT(exit_validity) },
39 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
40 { "exit_external_request", VCPU_STAT(exit_external_request) },
41 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
42 { "exit_instruction", VCPU_STAT(exit_instruction) },
43 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
44 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
45 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
46 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
47 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
48 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
49 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
50 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
51 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
52 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
53 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
54 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
55 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
56 { "instruction_spx", VCPU_STAT(instruction_spx) },
57 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
58 { "instruction_stap", VCPU_STAT(instruction_stap) },
59 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
60 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
61 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
62 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
63 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
64 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
65 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
66 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
67 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
68 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
69 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
70 { "diagnose_44", VCPU_STAT(diagnose_44) },
71 { NULL }
72};
73
74static unsigned long long *facilities;
75
76
77void kvm_arch_hardware_enable(void *garbage)
78{
79
80}
81
82void kvm_arch_hardware_disable(void *garbage)
83{
84}
85
86int kvm_arch_hardware_setup(void)
87{
88 return 0;
89}
90
91void kvm_arch_hardware_unsetup(void)
92{
93}
94
95void kvm_arch_check_processor_compat(void *rtn)
96{
97}
98
99int kvm_arch_init(void *opaque)
100{
101 return 0;
102}
103
104void kvm_arch_exit(void)
105{
106}
107
108
109long kvm_arch_dev_ioctl(struct file *filp,
110 unsigned int ioctl, unsigned long arg)
111{
112 if (ioctl == KVM_S390_ENABLE_SIE)
113 return s390_enable_sie();
114 return -EINVAL;
115}
116
117int kvm_dev_ioctl_check_extension(long ext)
118{
119 switch (ext) {
120 default:
121 return 0;
122 }
123}
124
125
126
127
128
129int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
130 struct kvm_dirty_log *log)
131{
132 return 0;
133}
134
135long kvm_arch_vm_ioctl(struct file *filp,
136 unsigned int ioctl, unsigned long arg)
137{
138 struct kvm *kvm = filp->private_data;
139 void __user *argp = (void __user *)arg;
140 int r;
141
142 switch (ioctl) {
143 case KVM_S390_INTERRUPT: {
144 struct kvm_s390_interrupt s390int;
145
146 r = -EFAULT;
147 if (copy_from_user(&s390int, argp, sizeof(s390int)))
148 break;
149 r = kvm_s390_inject_vm(kvm, &s390int);
150 break;
151 }
152 default:
153 r = -EINVAL;
154 }
155
156 return r;
157}
158
159struct kvm *kvm_arch_create_vm(void)
160{
161 struct kvm *kvm;
162 int rc;
163 char debug_name[16];
164
165 rc = s390_enable_sie();
166 if (rc)
167 goto out_nokvm;
168
169 rc = -ENOMEM;
170 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
171 if (!kvm)
172 goto out_nokvm;
173
174 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
175 if (!kvm->arch.sca)
176 goto out_nosca;
177
178 sprintf(debug_name, "kvm-%u", current->pid);
179
180 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
181 if (!kvm->arch.dbf)
182 goto out_nodbf;
183
184 spin_lock_init(&kvm->arch.float_int.lock);
185 INIT_LIST_HEAD(&kvm->arch.float_int.list);
186
187 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
188 VM_EVENT(kvm, 3, "%s", "vm created");
189
190 return kvm;
191out_nodbf:
192 free_page((unsigned long)(kvm->arch.sca));
193out_nosca:
194 kfree(kvm);
195out_nokvm:
196 return ERR_PTR(rc);
197}
198
199void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
200{
201 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
202 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
203 (__u64) vcpu->arch.sie_block)
204 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
205 smp_mb();
206 free_page((unsigned long)(vcpu->arch.sie_block));
207 kvm_vcpu_uninit(vcpu);
208 kfree(vcpu);
209}
210
211static void kvm_free_vcpus(struct kvm *kvm)
212{
213 unsigned int i;
214 struct kvm_vcpu *vcpu;
215
216 kvm_for_each_vcpu(i, vcpu, kvm)
217 kvm_arch_vcpu_destroy(vcpu);
218
219 mutex_lock(&kvm->lock);
220 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
221 kvm->vcpus[i] = NULL;
222
223 atomic_set(&kvm->online_vcpus, 0);
224 mutex_unlock(&kvm->lock);
225}
226
227void kvm_arch_sync_events(struct kvm *kvm)
228{
229}
230
231void kvm_arch_destroy_vm(struct kvm *kvm)
232{
233 kvm_free_vcpus(kvm);
234 kvm_free_physmem(kvm);
235 free_page((unsigned long)(kvm->arch.sca));
236 debug_unregister(kvm->arch.dbf);
237 kfree(kvm);
238}
239
240
241int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
242{
243 return 0;
244}
245
246void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
247{
248
249}
250
251void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
252{
253 save_fp_regs(&vcpu->arch.host_fpregs);
254 save_access_regs(vcpu->arch.host_acrs);
255 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
256 restore_fp_regs(&vcpu->arch.guest_fpregs);
257 restore_access_regs(vcpu->arch.guest_acrs);
258}
259
260void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
261{
262 save_fp_regs(&vcpu->arch.guest_fpregs);
263 save_access_regs(vcpu->arch.guest_acrs);
264 restore_fp_regs(&vcpu->arch.host_fpregs);
265 restore_access_regs(vcpu->arch.host_acrs);
266}
267
268static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
269{
270
271 vcpu->arch.sie_block->gpsw.mask = 0UL;
272 vcpu->arch.sie_block->gpsw.addr = 0UL;
273 vcpu->arch.sie_block->prefix = 0UL;
274 vcpu->arch.sie_block->ihcpu = 0xffff;
275 vcpu->arch.sie_block->cputm = 0UL;
276 vcpu->arch.sie_block->ckc = 0UL;
277 vcpu->arch.sie_block->todpr = 0;
278 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
279 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
280 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
281 vcpu->arch.guest_fpregs.fpc = 0;
282 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
283 vcpu->arch.sie_block->gbea = 1;
284}
285
286int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
287{
288 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
289 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
290 vcpu->arch.sie_block->ecb = 2;
291 vcpu->arch.sie_block->eca = 0xC1002001U;
292 vcpu->arch.sie_block->fac = (int) (long) facilities;
293 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
294 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
295 (unsigned long) vcpu);
296 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
297 get_cpu_id(&vcpu->arch.cpu_id);
298 vcpu->arch.cpu_id.version = 0xff;
299 return 0;
300}
301
302struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
303 unsigned int id)
304{
305 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
306 int rc = -ENOMEM;
307
308 if (!vcpu)
309 goto out_nomem;
310
311 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
312 get_zeroed_page(GFP_KERNEL);
313
314 if (!vcpu->arch.sie_block)
315 goto out_free_cpu;
316
317 vcpu->arch.sie_block->icpua = id;
318 BUG_ON(!kvm->arch.sca);
319 if (!kvm->arch.sca->cpu[id].sda)
320 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
321 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
322 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
323
324 spin_lock_init(&vcpu->arch.local_int.lock);
325 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
326 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
327 spin_lock(&kvm->arch.float_int.lock);
328 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
329 init_waitqueue_head(&vcpu->arch.local_int.wq);
330 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
331 spin_unlock(&kvm->arch.float_int.lock);
332
333 rc = kvm_vcpu_init(vcpu, kvm, id);
334 if (rc)
335 goto out_free_cpu;
336 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
337 vcpu->arch.sie_block);
338
339 return vcpu;
340out_free_cpu:
341 kfree(vcpu);
342out_nomem:
343 return ERR_PTR(rc);
344}
345
346int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
347{
348
349 BUG();
350 return 0;
351}
352
353static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
354{
355 vcpu_load(vcpu);
356 kvm_s390_vcpu_initial_reset(vcpu);
357 vcpu_put(vcpu);
358 return 0;
359}
360
361int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
362{
363 vcpu_load(vcpu);
364 memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs));
365 vcpu_put(vcpu);
366 return 0;
367}
368
369int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
370{
371 vcpu_load(vcpu);
372 memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
373 vcpu_put(vcpu);
374 return 0;
375}
376
377int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
378 struct kvm_sregs *sregs)
379{
380 vcpu_load(vcpu);
381 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
382 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
383 vcpu_put(vcpu);
384 return 0;
385}
386
387int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
388 struct kvm_sregs *sregs)
389{
390 vcpu_load(vcpu);
391 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
392 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
393 vcpu_put(vcpu);
394 return 0;
395}
396
397int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
398{
399 vcpu_load(vcpu);
400 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
401 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
402 vcpu_put(vcpu);
403 return 0;
404}
405
406int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
407{
408 vcpu_load(vcpu);
409 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
410 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
411 vcpu_put(vcpu);
412 return 0;
413}
414
415static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
416{
417 int rc = 0;
418
419 vcpu_load(vcpu);
420 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
421 rc = -EBUSY;
422 else
423 vcpu->arch.sie_block->gpsw = psw;
424 vcpu_put(vcpu);
425 return rc;
426}
427
428int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
429 struct kvm_translation *tr)
430{
431 return -EINVAL;
432}
433
434int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
435 struct kvm_guest_debug *dbg)
436{
437 return -EINVAL;
438}
439
440int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
441 struct kvm_mp_state *mp_state)
442{
443 return -EINVAL;
444}
445
446int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
447 struct kvm_mp_state *mp_state)
448{
449 return -EINVAL;
450}
451
452static void __vcpu_run(struct kvm_vcpu *vcpu)
453{
454 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
455
456 if (need_resched())
457 schedule();
458
459 if (test_thread_flag(TIF_MCCK_PENDING))
460 s390_handle_mcck();
461
462 kvm_s390_deliver_pending_interrupts(vcpu);
463
464 vcpu->arch.sie_block->icptcode = 0;
465 local_irq_disable();
466 kvm_guest_enter();
467 local_irq_enable();
468 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
469 atomic_read(&vcpu->arch.sie_block->cpuflags));
470 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
471 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
472 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
473 }
474 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
475 vcpu->arch.sie_block->icptcode);
476 local_irq_disable();
477 kvm_guest_exit();
478 local_irq_enable();
479
480 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
481}
482
483int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
484{
485 int rc;
486 sigset_t sigsaved;
487
488 vcpu_load(vcpu);
489
490rerun_vcpu:
491 if (vcpu->requests)
492 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
493 kvm_s390_vcpu_set_mem(vcpu);
494
495
496 if (!vcpu->arch.sie_block->gmslm) {
497 vcpu_put(vcpu);
498 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
499 return -EINVAL;
500 }
501
502 if (vcpu->sigset_active)
503 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
504
505 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
506
507 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
508
509 switch (kvm_run->exit_reason) {
510 case KVM_EXIT_S390_SIEIC:
511 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
512 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
513 break;
514 case KVM_EXIT_UNKNOWN:
515 case KVM_EXIT_INTR:
516 case KVM_EXIT_S390_RESET:
517 break;
518 default:
519 BUG();
520 }
521
522 might_fault();
523
524 do {
525 __vcpu_run(vcpu);
526 rc = kvm_handle_sie_intercept(vcpu);
527 } while (!signal_pending(current) && !rc);
528
529 if (rc == SIE_INTERCEPT_RERUNVCPU)
530 goto rerun_vcpu;
531
532 if (signal_pending(current) && !rc) {
533 kvm_run->exit_reason = KVM_EXIT_INTR;
534 rc = -EINTR;
535 }
536
537 if (rc == -ENOTSUPP) {
538
539 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
540 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
541 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
542 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
543 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
544 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
545 rc = 0;
546 }
547
548 if (rc == -EREMOTE) {
549
550
551 rc = 0;
552 }
553
554 if (vcpu->sigset_active)
555 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
556
557 vcpu_put(vcpu);
558
559 vcpu->stat.exit_userspace++;
560 return rc;
561}
562
563static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
564 unsigned long n, int prefix)
565{
566 if (prefix)
567 return copy_to_guest(vcpu, guestdest, from, n);
568 else
569 return copy_to_guest_absolute(vcpu, guestdest, from, n);
570}
571
572
573
574
575
576
577
578int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
579{
580 const unsigned char archmode = 1;
581 int prefix;
582
583 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
584 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
585 return -EFAULT;
586 addr = SAVE_AREA_BASE;
587 prefix = 0;
588 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
589 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
590 return -EFAULT;
591 addr = SAVE_AREA_BASE;
592 prefix = 1;
593 } else
594 prefix = 0;
595
596 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
597 vcpu->arch.guest_fpregs.fprs, 128, prefix))
598 return -EFAULT;
599
600 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
601 vcpu->arch.guest_gprs, 128, prefix))
602 return -EFAULT;
603
604 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
605 &vcpu->arch.sie_block->gpsw, 16, prefix))
606 return -EFAULT;
607
608 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
609 &vcpu->arch.sie_block->prefix, 4, prefix))
610 return -EFAULT;
611
612 if (__guestcopy(vcpu,
613 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
614 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
615 return -EFAULT;
616
617 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
618 &vcpu->arch.sie_block->todpr, 4, prefix))
619 return -EFAULT;
620
621 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
622 &vcpu->arch.sie_block->cputm, 8, prefix))
623 return -EFAULT;
624
625 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
626 &vcpu->arch.sie_block->ckc, 8, prefix))
627 return -EFAULT;
628
629 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
630 &vcpu->arch.guest_acrs, 64, prefix))
631 return -EFAULT;
632
633 if (__guestcopy(vcpu,
634 addr + offsetof(struct save_area_s390x, ctrl_regs),
635 &vcpu->arch.sie_block->gcr, 128, prefix))
636 return -EFAULT;
637 return 0;
638}
639
640static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
641{
642 int rc;
643
644 vcpu_load(vcpu);
645 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
646 vcpu_put(vcpu);
647 return rc;
648}
649
650long kvm_arch_vcpu_ioctl(struct file *filp,
651 unsigned int ioctl, unsigned long arg)
652{
653 struct kvm_vcpu *vcpu = filp->private_data;
654 void __user *argp = (void __user *)arg;
655
656 switch (ioctl) {
657 case KVM_S390_INTERRUPT: {
658 struct kvm_s390_interrupt s390int;
659
660 if (copy_from_user(&s390int, argp, sizeof(s390int)))
661 return -EFAULT;
662 return kvm_s390_inject_vcpu(vcpu, &s390int);
663 }
664 case KVM_S390_STORE_STATUS:
665 return kvm_s390_vcpu_store_status(vcpu, arg);
666 case KVM_S390_SET_INITIAL_PSW: {
667 psw_t psw;
668
669 if (copy_from_user(&psw, argp, sizeof(psw)))
670 return -EFAULT;
671 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
672 }
673 case KVM_S390_INITIAL_RESET:
674 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
675 default:
676 ;
677 }
678 return -EINVAL;
679}
680
681
682int kvm_arch_set_memory_region(struct kvm *kvm,
683 struct kvm_userspace_memory_region *mem,
684 struct kvm_memory_slot old,
685 int user_alloc)
686{
687 int i;
688 struct kvm_vcpu *vcpu;
689
690
691
692
693
694
695
696
697 if (mem->slot)
698 return -EINVAL;
699
700 if (mem->guest_phys_addr)
701 return -EINVAL;
702
703 if (mem->userspace_addr & (PAGE_SIZE - 1))
704 return -EINVAL;
705
706 if (mem->memory_size & (PAGE_SIZE - 1))
707 return -EINVAL;
708
709 if (!user_alloc)
710 return -EINVAL;
711
712
713 kvm_for_each_vcpu(i, vcpu, kvm) {
714 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
715 continue;
716 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
717 }
718
719 return 0;
720}
721
722void kvm_arch_flush_shadow(struct kvm *kvm)
723{
724}
725
726gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
727{
728 return gfn;
729}
730
731static int __init kvm_s390_init(void)
732{
733 int ret;
734 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
735 if (ret)
736 return ret;
737
738
739
740
741
742
743 facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
744 if (!facilities) {
745 kvm_exit();
746 return -ENOMEM;
747 }
748 stfle(facilities, 1);
749 facilities[0] &= 0xff00fff3f0700000ULL;
750 return 0;
751}
752
753static void __exit kvm_s390_exit(void)
754{
755 free_page((unsigned long) facilities);
756 kvm_exit();
757}
758
759module_init(kvm_s390_init);
760module_exit(kvm_s390_exit);
761