1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
19#include <linux/hrtimer.h>
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/timer.h>
26#include <asm/asm-offsets.h>
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
29#include <asm/nmi.h>
30#include <asm/system.h>
31#include "kvm-s390.h"
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75 { "diagnose_10", VCPU_STAT(diagnose_10) },
76 { "diagnose_44", VCPU_STAT(diagnose_44) },
77 { NULL }
78};
79
80static unsigned long long *facilities;
81
82
83int kvm_arch_hardware_enable(void *garbage)
84{
85
86 return 0;
87}
88
89void kvm_arch_hardware_disable(void *garbage)
90{
91}
92
93int kvm_arch_hardware_setup(void)
94{
95 return 0;
96}
97
98void kvm_arch_hardware_unsetup(void)
99{
100}
101
102void kvm_arch_check_processor_compat(void *rtn)
103{
104}
105
106int kvm_arch_init(void *opaque)
107{
108 return 0;
109}
110
111void kvm_arch_exit(void)
112{
113}
114
115
116long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118{
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122}
123
124int kvm_dev_ioctl_check_extension(long ext)
125{
126 int r;
127
128 switch (ext) {
129 case KVM_CAP_S390_PSW:
130 case KVM_CAP_S390_GMAP:
131 case KVM_CAP_SYNC_MMU:
132 r = 1;
133 break;
134 default:
135 r = 0;
136 }
137 return r;
138}
139
140
141
142
143
144int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
145 struct kvm_dirty_log *log)
146{
147 return 0;
148}
149
150long kvm_arch_vm_ioctl(struct file *filp,
151 unsigned int ioctl, unsigned long arg)
152{
153 struct kvm *kvm = filp->private_data;
154 void __user *argp = (void __user *)arg;
155 int r;
156
157 switch (ioctl) {
158 case KVM_S390_INTERRUPT: {
159 struct kvm_s390_interrupt s390int;
160
161 r = -EFAULT;
162 if (copy_from_user(&s390int, argp, sizeof(s390int)))
163 break;
164 r = kvm_s390_inject_vm(kvm, &s390int);
165 break;
166 }
167 default:
168 r = -ENOTTY;
169 }
170
171 return r;
172}
173
174int kvm_arch_init_vm(struct kvm *kvm)
175{
176 int rc;
177 char debug_name[16];
178
179 rc = s390_enable_sie();
180 if (rc)
181 goto out_err;
182
183 rc = -ENOMEM;
184
185 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
186 if (!kvm->arch.sca)
187 goto out_err;
188
189 sprintf(debug_name, "kvm-%u", current->pid);
190
191 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
192 if (!kvm->arch.dbf)
193 goto out_nodbf;
194
195 spin_lock_init(&kvm->arch.float_int.lock);
196 INIT_LIST_HEAD(&kvm->arch.float_int.list);
197
198 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
199 VM_EVENT(kvm, 3, "%s", "vm created");
200
201 kvm->arch.gmap = gmap_alloc(current->mm);
202 if (!kvm->arch.gmap)
203 goto out_nogmap;
204
205 return 0;
206out_nogmap:
207 debug_unregister(kvm->arch.dbf);
208out_nodbf:
209 free_page((unsigned long)(kvm->arch.sca));
210out_err:
211 return rc;
212}
213
214void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
215{
216 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
217 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
218 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
219 (__u64) vcpu->arch.sie_block)
220 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
221 smp_mb();
222 free_page((unsigned long)(vcpu->arch.sie_block));
223 kvm_vcpu_uninit(vcpu);
224 kfree(vcpu);
225}
226
227static void kvm_free_vcpus(struct kvm *kvm)
228{
229 unsigned int i;
230 struct kvm_vcpu *vcpu;
231
232 kvm_for_each_vcpu(i, vcpu, kvm)
233 kvm_arch_vcpu_destroy(vcpu);
234
235 mutex_lock(&kvm->lock);
236 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
237 kvm->vcpus[i] = NULL;
238
239 atomic_set(&kvm->online_vcpus, 0);
240 mutex_unlock(&kvm->lock);
241}
242
243void kvm_arch_sync_events(struct kvm *kvm)
244{
245}
246
247void kvm_arch_destroy_vm(struct kvm *kvm)
248{
249 kvm_free_vcpus(kvm);
250 free_page((unsigned long)(kvm->arch.sca));
251 debug_unregister(kvm->arch.dbf);
252 gmap_free(kvm->arch.gmap);
253}
254
255
256int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
257{
258 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
259 return 0;
260}
261
262void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
263{
264
265}
266
267void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
268{
269 save_fp_regs(&vcpu->arch.host_fpregs);
270 save_access_regs(vcpu->arch.host_acrs);
271 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
272 restore_fp_regs(&vcpu->arch.guest_fpregs);
273 restore_access_regs(vcpu->arch.guest_acrs);
274 gmap_enable(vcpu->arch.gmap);
275 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
276}
277
278void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
279{
280 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
281 gmap_disable(vcpu->arch.gmap);
282 save_fp_regs(&vcpu->arch.guest_fpregs);
283 save_access_regs(vcpu->arch.guest_acrs);
284 restore_fp_regs(&vcpu->arch.host_fpregs);
285 restore_access_regs(vcpu->arch.host_acrs);
286}
287
288static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
289{
290
291 vcpu->arch.sie_block->gpsw.mask = 0UL;
292 vcpu->arch.sie_block->gpsw.addr = 0UL;
293 vcpu->arch.sie_block->prefix = 0UL;
294 vcpu->arch.sie_block->ihcpu = 0xffff;
295 vcpu->arch.sie_block->cputm = 0UL;
296 vcpu->arch.sie_block->ckc = 0UL;
297 vcpu->arch.sie_block->todpr = 0;
298 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
299 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
300 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
301 vcpu->arch.guest_fpregs.fpc = 0;
302 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
303 vcpu->arch.sie_block->gbea = 1;
304}
305
306int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
307{
308 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
309 CPUSTAT_SM |
310 CPUSTAT_STOPPED);
311 vcpu->arch.sie_block->ecb = 6;
312 vcpu->arch.sie_block->eca = 0xC1002001U;
313 vcpu->arch.sie_block->fac = (int) (long) facilities;
314 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
315 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
316 (unsigned long) vcpu);
317 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
318 get_cpu_id(&vcpu->arch.cpu_id);
319 vcpu->arch.cpu_id.version = 0xff;
320 return 0;
321}
322
323struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
324 unsigned int id)
325{
326 struct kvm_vcpu *vcpu;
327 int rc = -EINVAL;
328
329 if (id >= KVM_MAX_VCPUS)
330 goto out;
331
332 rc = -ENOMEM;
333
334 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
335 if (!vcpu)
336 goto out;
337
338 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
339 get_zeroed_page(GFP_KERNEL);
340
341 if (!vcpu->arch.sie_block)
342 goto out_free_cpu;
343
344 vcpu->arch.sie_block->icpua = id;
345 BUG_ON(!kvm->arch.sca);
346 if (!kvm->arch.sca->cpu[id].sda)
347 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
348 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
349 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
350 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
351
352 spin_lock_init(&vcpu->arch.local_int.lock);
353 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
354 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
355 spin_lock(&kvm->arch.float_int.lock);
356 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
357 init_waitqueue_head(&vcpu->arch.local_int.wq);
358 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
359 spin_unlock(&kvm->arch.float_int.lock);
360
361 rc = kvm_vcpu_init(vcpu, kvm, id);
362 if (rc)
363 goto out_free_sie_block;
364 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
365 vcpu->arch.sie_block);
366
367 return vcpu;
368out_free_sie_block:
369 free_page((unsigned long)(vcpu->arch.sie_block));
370out_free_cpu:
371 kfree(vcpu);
372out:
373 return ERR_PTR(rc);
374}
375
376int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
377{
378
379 BUG();
380 return 0;
381}
382
383static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
384{
385 kvm_s390_vcpu_initial_reset(vcpu);
386 return 0;
387}
388
389int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
390{
391 memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs));
392 return 0;
393}
394
395int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
396{
397 memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
398 return 0;
399}
400
401int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
402 struct kvm_sregs *sregs)
403{
404 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
405 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
406 restore_access_regs(vcpu->arch.guest_acrs);
407 return 0;
408}
409
410int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
411 struct kvm_sregs *sregs)
412{
413 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
414 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
415 return 0;
416}
417
418int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
419{
420 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
421 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
422 restore_fp_regs(&vcpu->arch.guest_fpregs);
423 return 0;
424}
425
426int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
427{
428 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
429 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
430 return 0;
431}
432
433static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
434{
435 int rc = 0;
436
437 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
438 rc = -EBUSY;
439 else {
440 vcpu->run->psw_mask = psw.mask;
441 vcpu->run->psw_addr = psw.addr;
442 }
443 return rc;
444}
445
446int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
447 struct kvm_translation *tr)
448{
449 return -EINVAL;
450}
451
452int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
453 struct kvm_guest_debug *dbg)
454{
455 return -EINVAL;
456}
457
458int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
459 struct kvm_mp_state *mp_state)
460{
461 return -EINVAL;
462}
463
464int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
465 struct kvm_mp_state *mp_state)
466{
467 return -EINVAL;
468}
469
470static void __vcpu_run(struct kvm_vcpu *vcpu)
471{
472 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
473
474 if (need_resched())
475 schedule();
476
477 if (test_thread_flag(TIF_MCCK_PENDING))
478 s390_handle_mcck();
479
480 kvm_s390_deliver_pending_interrupts(vcpu);
481
482 vcpu->arch.sie_block->icptcode = 0;
483 local_irq_disable();
484 kvm_guest_enter();
485 local_irq_enable();
486 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
487 atomic_read(&vcpu->arch.sie_block->cpuflags));
488 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
489 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
490 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
491 }
492 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
493 vcpu->arch.sie_block->icptcode);
494 local_irq_disable();
495 kvm_guest_exit();
496 local_irq_enable();
497
498 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
499}
500
501int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
502{
503 int rc;
504 sigset_t sigsaved;
505
506rerun_vcpu:
507 if (vcpu->sigset_active)
508 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
509
510 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
511
512 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
513
514 switch (kvm_run->exit_reason) {
515 case KVM_EXIT_S390_SIEIC:
516 case KVM_EXIT_UNKNOWN:
517 case KVM_EXIT_INTR:
518 case KVM_EXIT_S390_RESET:
519 break;
520 default:
521 BUG();
522 }
523
524 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
525 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
526
527 might_fault();
528
529 do {
530 __vcpu_run(vcpu);
531 rc = kvm_handle_sie_intercept(vcpu);
532 } while (!signal_pending(current) && !rc);
533
534 if (rc == SIE_INTERCEPT_RERUNVCPU)
535 goto rerun_vcpu;
536
537 if (signal_pending(current) && !rc) {
538 kvm_run->exit_reason = KVM_EXIT_INTR;
539 rc = -EINTR;
540 }
541
542 if (rc == -EOPNOTSUPP) {
543
544 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
545 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
546 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
547 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
548 rc = 0;
549 }
550
551 if (rc == -EREMOTE) {
552
553
554 rc = 0;
555 }
556
557 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
558 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
559
560 if (vcpu->sigset_active)
561 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
562
563 vcpu->stat.exit_userspace++;
564 return rc;
565}
566
567static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
568 unsigned long n, int prefix)
569{
570 if (prefix)
571 return copy_to_guest(vcpu, guestdest, from, n);
572 else
573 return copy_to_guest_absolute(vcpu, guestdest, from, n);
574}
575
576
577
578
579
580
581
582int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
583{
584 unsigned char archmode = 1;
585 int prefix;
586
587 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
588 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
589 return -EFAULT;
590 addr = SAVE_AREA_BASE;
591 prefix = 0;
592 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
593 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
594 return -EFAULT;
595 addr = SAVE_AREA_BASE;
596 prefix = 1;
597 } else
598 prefix = 0;
599
600 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
601 vcpu->arch.guest_fpregs.fprs, 128, prefix))
602 return -EFAULT;
603
604 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
605 vcpu->arch.guest_gprs, 128, prefix))
606 return -EFAULT;
607
608 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
609 &vcpu->arch.sie_block->gpsw, 16, prefix))
610 return -EFAULT;
611
612 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
613 &vcpu->arch.sie_block->prefix, 4, prefix))
614 return -EFAULT;
615
616 if (__guestcopy(vcpu,
617 addr + offsetof(struct save_area, fp_ctrl_reg),
618 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
619 return -EFAULT;
620
621 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
622 &vcpu->arch.sie_block->todpr, 4, prefix))
623 return -EFAULT;
624
625 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
626 &vcpu->arch.sie_block->cputm, 8, prefix))
627 return -EFAULT;
628
629 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
630 &vcpu->arch.sie_block->ckc, 8, prefix))
631 return -EFAULT;
632
633 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
634 &vcpu->arch.guest_acrs, 64, prefix))
635 return -EFAULT;
636
637 if (__guestcopy(vcpu,
638 addr + offsetof(struct save_area, ctrl_regs),
639 &vcpu->arch.sie_block->gcr, 128, prefix))
640 return -EFAULT;
641 return 0;
642}
643
644long kvm_arch_vcpu_ioctl(struct file *filp,
645 unsigned int ioctl, unsigned long arg)
646{
647 struct kvm_vcpu *vcpu = filp->private_data;
648 void __user *argp = (void __user *)arg;
649 long r;
650
651 switch (ioctl) {
652 case KVM_S390_INTERRUPT: {
653 struct kvm_s390_interrupt s390int;
654
655 r = -EFAULT;
656 if (copy_from_user(&s390int, argp, sizeof(s390int)))
657 break;
658 r = kvm_s390_inject_vcpu(vcpu, &s390int);
659 break;
660 }
661 case KVM_S390_STORE_STATUS:
662 r = kvm_s390_vcpu_store_status(vcpu, arg);
663 break;
664 case KVM_S390_SET_INITIAL_PSW: {
665 psw_t psw;
666
667 r = -EFAULT;
668 if (copy_from_user(&psw, argp, sizeof(psw)))
669 break;
670 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
671 break;
672 }
673 case KVM_S390_INITIAL_RESET:
674 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
675 break;
676 default:
677 r = -EINVAL;
678 }
679 return r;
680}
681
682
683int kvm_arch_prepare_memory_region(struct kvm *kvm,
684 struct kvm_memory_slot *memslot,
685 struct kvm_memory_slot old,
686 struct kvm_userspace_memory_region *mem,
687 int user_alloc)
688{
689
690
691
692
693
694
695
696 if (mem->slot)
697 return -EINVAL;
698
699 if (mem->guest_phys_addr)
700 return -EINVAL;
701
702 if (mem->userspace_addr & 0xffffful)
703 return -EINVAL;
704
705 if (mem->memory_size & 0xffffful)
706 return -EINVAL;
707
708 if (!user_alloc)
709 return -EINVAL;
710
711 return 0;
712}
713
714void kvm_arch_commit_memory_region(struct kvm *kvm,
715 struct kvm_userspace_memory_region *mem,
716 struct kvm_memory_slot old,
717 int user_alloc)
718{
719 int rc;
720
721
722 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
723 mem->guest_phys_addr, mem->memory_size);
724 if (rc)
725 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
726 return;
727}
728
729void kvm_arch_flush_shadow(struct kvm *kvm)
730{
731}
732
733static int __init kvm_s390_init(void)
734{
735 int ret;
736 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
737 if (ret)
738 return ret;
739
740
741
742
743
744
745 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
746 if (!facilities) {
747 kvm_exit();
748 return -ENOMEM;
749 }
750 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
751 facilities[0] &= 0xff00fff3f47c0000ULL;
752 facilities[1] &= 0x201c000000000000ULL;
753 return 0;
754}
755
756static void __exit kvm_s390_exit(void)
757{
758 free_page((unsigned long) facilities);
759 kvm_exit();
760}
761
762module_init(kvm_s390_init);
763module_exit(kvm_s390_exit);
764