1
2
3
4
5
6
7
8
9
10
11#include <linux/kvm.h>
12#include <linux/gfp.h>
13#include <linux/errno.h>
14#include <linux/compat.h>
15#include <linux/mm_types.h>
16#include <linux/pgtable.h>
17
18#include <asm/asm-offsets.h>
19#include <asm/facility.h>
20#include <asm/current.h>
21#include <asm/debug.h>
22#include <asm/ebcdic.h>
23#include <asm/sysinfo.h>
24#include <asm/page-states.h>
25#include <asm/gmap.h>
26#include <asm/io.h>
27#include <asm/ptrace.h>
28#include <asm/sclp.h>
29#include <asm/ap.h>
30#include "gaccess.h"
31#include "kvm-s390.h"
32#include "trace.h"
33
34static int handle_ri(struct kvm_vcpu *vcpu)
35{
36 vcpu->stat.instruction_ri++;
37
38 if (test_kvm_facility(vcpu->kvm, 64)) {
39 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
40 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
41 kvm_s390_retry_instr(vcpu);
42 return 0;
43 } else
44 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
45}
46
47int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
48{
49 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
50 return handle_ri(vcpu);
51 else
52 return -EOPNOTSUPP;
53}
54
55static int handle_gs(struct kvm_vcpu *vcpu)
56{
57 vcpu->stat.instruction_gs++;
58
59 if (test_kvm_facility(vcpu->kvm, 133)) {
60 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
61 preempt_disable();
62 __ctl_set_bit(2, 4);
63 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
64 restore_gs_cb(current->thread.gs_cb);
65 preempt_enable();
66 vcpu->arch.sie_block->ecb |= ECB_GS;
67 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
68 vcpu->arch.gs_enabled = 1;
69 kvm_s390_retry_instr(vcpu);
70 return 0;
71 } else
72 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
73}
74
75int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
76{
77 int code = vcpu->arch.sie_block->ipb & 0xff;
78
79 if (code == 0x49 || code == 0x4d)
80 return handle_gs(vcpu);
81 else
82 return -EOPNOTSUPP;
83}
84
85static int handle_set_clock(struct kvm_vcpu *vcpu)
86{
87 struct kvm_s390_vm_tod_clock gtod = { 0 };
88 int rc;
89 u8 ar;
90 u64 op2;
91
92 vcpu->stat.instruction_sck++;
93
94 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
95 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
96
97 op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
98 if (op2 & 7)
99 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
100 rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod));
101 if (rc)
102 return kvm_s390_inject_prog_cond(vcpu, rc);
103
104 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
105 kvm_s390_set_tod_clock(vcpu->kvm, >od);
106
107 kvm_s390_set_psw_cc(vcpu, 0);
108 return 0;
109}
110
111static int handle_set_prefix(struct kvm_vcpu *vcpu)
112{
113 u64 operand2;
114 u32 address;
115 int rc;
116 u8 ar;
117
118 vcpu->stat.instruction_spx++;
119
120 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
121 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
122
123 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
124
125
126 if (operand2 & 3)
127 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
128
129
130 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
131 if (rc)
132 return kvm_s390_inject_prog_cond(vcpu, rc);
133
134 address &= 0x7fffe000u;
135
136
137
138
139
140
141 if (kvm_is_error_gpa(vcpu->kvm, address))
142 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
143
144 kvm_s390_set_prefix(vcpu, address);
145 trace_kvm_s390_handle_prefix(vcpu, 1, address);
146 return 0;
147}
148
149static int handle_store_prefix(struct kvm_vcpu *vcpu)
150{
151 u64 operand2;
152 u32 address;
153 int rc;
154 u8 ar;
155
156 vcpu->stat.instruction_stpx++;
157
158 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
159 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
160
161 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
162
163
164 if (operand2 & 3)
165 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
166
167 address = kvm_s390_get_prefix(vcpu);
168
169
170 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
171 if (rc)
172 return kvm_s390_inject_prog_cond(vcpu, rc);
173
174 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
175 trace_kvm_s390_handle_prefix(vcpu, 0, address);
176 return 0;
177}
178
179static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
180{
181 u16 vcpu_id = vcpu->vcpu_id;
182 u64 ga;
183 int rc;
184 u8 ar;
185
186 vcpu->stat.instruction_stap++;
187
188 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
189 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
190
191 ga = kvm_s390_get_base_disp_s(vcpu, &ar);
192
193 if (ga & 1)
194 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
195
196 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
197 if (rc)
198 return kvm_s390_inject_prog_cond(vcpu, rc);
199
200 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
201 trace_kvm_s390_handle_stap(vcpu, ga);
202 return 0;
203}
204
205int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
206{
207 int rc;
208
209 trace_kvm_s390_skey_related_inst(vcpu);
210
211 if (vcpu->arch.skey_enabled)
212 return 0;
213
214 rc = s390_enable_skey();
215 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
216 if (rc)
217 return rc;
218
219 if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
220 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
221 if (!vcpu->kvm->arch.use_skf)
222 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
223 else
224 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
225 vcpu->arch.skey_enabled = true;
226 return 0;
227}
228
229static int try_handle_skey(struct kvm_vcpu *vcpu)
230{
231 int rc;
232
233 rc = kvm_s390_skey_check_enable(vcpu);
234 if (rc)
235 return rc;
236 if (vcpu->kvm->arch.use_skf) {
237
238 kvm_s390_retry_instr(vcpu);
239 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
240 return -EAGAIN;
241 }
242 return 0;
243}
244
245static int handle_iske(struct kvm_vcpu *vcpu)
246{
247 unsigned long gaddr, vmaddr;
248 unsigned char key;
249 int reg1, reg2;
250 bool unlocked;
251 int rc;
252
253 vcpu->stat.instruction_iske++;
254
255 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
256 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
257
258 rc = try_handle_skey(vcpu);
259 if (rc)
260 return rc != -EAGAIN ? rc : 0;
261
262 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
263
264 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
265 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
266 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
267 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
268 if (kvm_is_error_hva(vmaddr))
269 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
270retry:
271 unlocked = false;
272 mmap_read_lock(current->mm);
273 rc = get_guest_storage_key(current->mm, vmaddr, &key);
274
275 if (rc) {
276 rc = fixup_user_fault(current->mm, vmaddr,
277 FAULT_FLAG_WRITE, &unlocked);
278 if (!rc) {
279 mmap_read_unlock(current->mm);
280 goto retry;
281 }
282 }
283 mmap_read_unlock(current->mm);
284 if (rc == -EFAULT)
285 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
286 if (rc < 0)
287 return rc;
288 vcpu->run->s.regs.gprs[reg1] &= ~0xff;
289 vcpu->run->s.regs.gprs[reg1] |= key;
290 return 0;
291}
292
293static int handle_rrbe(struct kvm_vcpu *vcpu)
294{
295 unsigned long vmaddr, gaddr;
296 int reg1, reg2;
297 bool unlocked;
298 int rc;
299
300 vcpu->stat.instruction_rrbe++;
301
302 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
303 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
304
305 rc = try_handle_skey(vcpu);
306 if (rc)
307 return rc != -EAGAIN ? rc : 0;
308
309 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
310
311 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
312 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
313 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
314 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
315 if (kvm_is_error_hva(vmaddr))
316 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
317retry:
318 unlocked = false;
319 mmap_read_lock(current->mm);
320 rc = reset_guest_reference_bit(current->mm, vmaddr);
321 if (rc < 0) {
322 rc = fixup_user_fault(current->mm, vmaddr,
323 FAULT_FLAG_WRITE, &unlocked);
324 if (!rc) {
325 mmap_read_unlock(current->mm);
326 goto retry;
327 }
328 }
329 mmap_read_unlock(current->mm);
330 if (rc == -EFAULT)
331 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
332 if (rc < 0)
333 return rc;
334 kvm_s390_set_psw_cc(vcpu, rc);
335 return 0;
336}
337
338#define SSKE_NQ 0x8
339#define SSKE_MR 0x4
340#define SSKE_MC 0x2
341#define SSKE_MB 0x1
342static int handle_sske(struct kvm_vcpu *vcpu)
343{
344 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
345 unsigned long start, end;
346 unsigned char key, oldkey;
347 int reg1, reg2;
348 bool unlocked;
349 int rc;
350
351 vcpu->stat.instruction_sske++;
352
353 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
354 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
355
356 rc = try_handle_skey(vcpu);
357 if (rc)
358 return rc != -EAGAIN ? rc : 0;
359
360 if (!test_kvm_facility(vcpu->kvm, 8))
361 m3 &= ~SSKE_MB;
362 if (!test_kvm_facility(vcpu->kvm, 10))
363 m3 &= ~(SSKE_MC | SSKE_MR);
364 if (!test_kvm_facility(vcpu->kvm, 14))
365 m3 &= ~SSKE_NQ;
366
367 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
368
369 key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
370 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
371 start = kvm_s390_logical_to_effective(vcpu, start);
372 if (m3 & SSKE_MB) {
373
374 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
375 } else {
376 start = kvm_s390_real_to_abs(vcpu, start);
377 end = start + PAGE_SIZE;
378 }
379
380 while (start != end) {
381 unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
382 unlocked = false;
383
384 if (kvm_is_error_hva(vmaddr))
385 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
386
387 mmap_read_lock(current->mm);
388 rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
389 m3 & SSKE_NQ, m3 & SSKE_MR,
390 m3 & SSKE_MC);
391
392 if (rc < 0) {
393 rc = fixup_user_fault(current->mm, vmaddr,
394 FAULT_FLAG_WRITE, &unlocked);
395 rc = !rc ? -EAGAIN : rc;
396 }
397 mmap_read_unlock(current->mm);
398 if (rc == -EFAULT)
399 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
400 if (rc < 0)
401 return rc;
402 start += PAGE_SIZE;
403 }
404
405 if (m3 & (SSKE_MC | SSKE_MR)) {
406 if (m3 & SSKE_MB) {
407
408 kvm_s390_set_psw_cc(vcpu, 3);
409 } else {
410 kvm_s390_set_psw_cc(vcpu, rc);
411 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
412 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
413 }
414 }
415 if (m3 & SSKE_MB) {
416 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
417 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
418 else
419 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
420 end = kvm_s390_logical_to_effective(vcpu, end);
421 vcpu->run->s.regs.gprs[reg2] |= end;
422 }
423 return 0;
424}
425
426static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
427{
428 vcpu->stat.instruction_ipte_interlock++;
429 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
430 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
431 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
432 kvm_s390_retry_instr(vcpu);
433 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
434 return 0;
435}
436
437static int handle_test_block(struct kvm_vcpu *vcpu)
438{
439 gpa_t addr;
440 int reg2;
441
442 vcpu->stat.instruction_tb++;
443
444 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
445 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
446
447 kvm_s390_get_regs_rre(vcpu, NULL, ®2);
448 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
449 addr = kvm_s390_logical_to_effective(vcpu, addr);
450 if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
451 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
452 addr = kvm_s390_real_to_abs(vcpu, addr);
453
454 if (kvm_is_error_gpa(vcpu->kvm, addr))
455 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
456
457
458
459
460 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
461 return -EFAULT;
462 kvm_s390_set_psw_cc(vcpu, 0);
463 vcpu->run->s.regs.gprs[0] = 0;
464 return 0;
465}
466
467static int handle_tpi(struct kvm_vcpu *vcpu)
468{
469 struct kvm_s390_interrupt_info *inti;
470 unsigned long len;
471 u32 tpi_data[3];
472 int rc;
473 u64 addr;
474 u8 ar;
475
476 vcpu->stat.instruction_tpi++;
477
478 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
479 if (addr & 3)
480 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
481
482 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
483 if (!inti) {
484 kvm_s390_set_psw_cc(vcpu, 0);
485 return 0;
486 }
487
488 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
489 tpi_data[1] = inti->io.io_int_parm;
490 tpi_data[2] = inti->io.io_int_word;
491 if (addr) {
492
493
494
495
496 len = sizeof(tpi_data) - 4;
497 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
498 if (rc) {
499 rc = kvm_s390_inject_prog_cond(vcpu, rc);
500 goto reinject_interrupt;
501 }
502 } else {
503
504
505
506
507 len = sizeof(tpi_data);
508 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
509
510 rc = -EFAULT;
511 goto reinject_interrupt;
512 }
513 }
514
515
516 kfree(inti);
517 kvm_s390_set_psw_cc(vcpu, 1);
518 return 0;
519reinject_interrupt:
520
521
522
523
524
525 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
526 kfree(inti);
527 rc = -EFAULT;
528 }
529
530 return rc ? -EFAULT : 0;
531}
532
533static int handle_tsch(struct kvm_vcpu *vcpu)
534{
535 struct kvm_s390_interrupt_info *inti = NULL;
536 const u64 isc_mask = 0xffUL << 24;
537
538 vcpu->stat.instruction_tsch++;
539
540
541 if (vcpu->run->s.regs.gprs[1])
542 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
543 vcpu->run->s.regs.gprs[1]);
544
545
546
547
548
549
550
551
552
553 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
554 vcpu->run->s390_tsch.dequeued = !!inti;
555 if (inti) {
556 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
557 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
558 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
559 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
560 }
561 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
562 kfree(inti);
563 return -EREMOTE;
564}
565
566static int handle_io_inst(struct kvm_vcpu *vcpu)
567{
568 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
569
570 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
571 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
572
573 if (vcpu->kvm->arch.css_support) {
574
575
576
577
578 if (vcpu->arch.sie_block->ipa == 0xb236)
579 return handle_tpi(vcpu);
580 if (vcpu->arch.sie_block->ipa == 0xb235)
581 return handle_tsch(vcpu);
582
583 vcpu->stat.instruction_io_other++;
584 return -EOPNOTSUPP;
585 } else {
586
587
588
589
590 kvm_s390_set_psw_cc(vcpu, 3);
591 return 0;
592 }
593}
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610static int handle_pqap(struct kvm_vcpu *vcpu)
611{
612 struct ap_queue_status status = {};
613 crypto_hook pqap_hook;
614 unsigned long reg0;
615 int ret;
616 uint8_t fc;
617
618
619 if (!ap_instructions_available())
620 return -EOPNOTSUPP;
621
622 if (!(vcpu->arch.sie_block->eca & ECA_APIE))
623 return -EOPNOTSUPP;
624
625
626
627
628
629
630
631
632 reg0 = vcpu->run->s.regs.gprs[0];
633 fc = (reg0 >> 24) & 0xff;
634 if (fc != 0x03)
635 return -EOPNOTSUPP;
636
637
638 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
639 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
640
641
642
643 if (reg0 & 0x007f0000UL)
644 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
645
646 if (!test_kvm_facility(vcpu->kvm, 15) && (reg0 & 0x00800000UL))
647 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
648
649 if (!(vcpu->kvm->arch.crypto.crycbd & 0x02) && (reg0 & 0x0000c0f0UL))
650 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
651
652
653
654 if (!test_kvm_facility(vcpu->kvm, 65))
655 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
656
657
658
659
660
661
662 down_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
663 if (vcpu->kvm->arch.crypto.pqap_hook) {
664 pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook;
665 ret = pqap_hook(vcpu);
666 if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000)
667 kvm_s390_set_psw_cc(vcpu, 3);
668 up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
669 return ret;
670 }
671 up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
672
673
674
675
676
677 status.response_code = 0x01;
678 memcpy(&vcpu->run->s.regs.gprs[1], &status, sizeof(status));
679 kvm_s390_set_psw_cc(vcpu, 3);
680 return 0;
681}
682
683static int handle_stfl(struct kvm_vcpu *vcpu)
684{
685 int rc;
686 unsigned int fac;
687
688 vcpu->stat.instruction_stfl++;
689
690 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
691 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
692
693
694
695
696
697 fac = *vcpu->kvm->arch.model.fac_list >> 32;
698 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
699 &fac, sizeof(fac));
700 if (rc)
701 return rc;
702 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
703 trace_kvm_s390_handle_stfl(vcpu, fac);
704 return 0;
705}
706
707#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
708#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
709#define PSW_ADDR_24 0x0000000000ffffffUL
710#define PSW_ADDR_31 0x000000007fffffffUL
711
712int is_valid_psw(psw_t *psw)
713{
714 if (psw->mask & PSW_MASK_UNASSIGNED)
715 return 0;
716 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
717 if (psw->addr & ~PSW_ADDR_31)
718 return 0;
719 }
720 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
721 return 0;
722 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
723 return 0;
724 if (psw->addr & 1)
725 return 0;
726 return 1;
727}
728
729int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
730{
731 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
732 psw_compat_t new_psw;
733 u64 addr;
734 int rc;
735 u8 ar;
736
737 vcpu->stat.instruction_lpsw++;
738
739 if (gpsw->mask & PSW_MASK_PSTATE)
740 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
741
742 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
743 if (addr & 7)
744 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
745
746 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
747 if (rc)
748 return kvm_s390_inject_prog_cond(vcpu, rc);
749 if (!(new_psw.mask & PSW32_MASK_BASE))
750 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
751 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
752 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
753 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
754 if (!is_valid_psw(gpsw))
755 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
756 return 0;
757}
758
759static int handle_lpswe(struct kvm_vcpu *vcpu)
760{
761 psw_t new_psw;
762 u64 addr;
763 int rc;
764 u8 ar;
765
766 vcpu->stat.instruction_lpswe++;
767
768 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
769 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
770
771 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
772 if (addr & 7)
773 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
774 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
775 if (rc)
776 return kvm_s390_inject_prog_cond(vcpu, rc);
777 vcpu->arch.sie_block->gpsw = new_psw;
778 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
779 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
780 return 0;
781}
782
783static int handle_stidp(struct kvm_vcpu *vcpu)
784{
785 u64 stidp_data = vcpu->kvm->arch.model.cpuid;
786 u64 operand2;
787 int rc;
788 u8 ar;
789
790 vcpu->stat.instruction_stidp++;
791
792 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
793 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
794
795 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
796
797 if (operand2 & 7)
798 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
799
800 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
801 if (rc)
802 return kvm_s390_inject_prog_cond(vcpu, rc);
803
804 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
805 return 0;
806}
807
808static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
809{
810 int cpus = 0;
811 int n;
812
813 cpus = atomic_read(&vcpu->kvm->online_vcpus);
814
815
816 if (stsi(mem, 3, 2, 2))
817 mem->count = 0;
818 if (mem->count < 8)
819 mem->count++;
820 for (n = mem->count - 1; n > 0 ; n--)
821 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
822
823 memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
824 mem->vm[0].cpus_total = cpus;
825 mem->vm[0].cpus_configured = cpus;
826 mem->vm[0].cpus_standby = 0;
827 mem->vm[0].cpus_reserved = 0;
828 mem->vm[0].caf = 1000;
829 memcpy(mem->vm[0].name, "KVMguest", 8);
830 ASCEBC(mem->vm[0].name, 8);
831 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
832 ASCEBC(mem->vm[0].cpi, 16);
833}
834
835static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
836 u8 fc, u8 sel1, u16 sel2)
837{
838 vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
839 vcpu->run->s390_stsi.addr = addr;
840 vcpu->run->s390_stsi.ar = ar;
841 vcpu->run->s390_stsi.fc = fc;
842 vcpu->run->s390_stsi.sel1 = sel1;
843 vcpu->run->s390_stsi.sel2 = sel2;
844}
845
846static int handle_stsi(struct kvm_vcpu *vcpu)
847{
848 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
849 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
850 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
851 unsigned long mem = 0;
852 u64 operand2;
853 int rc = 0;
854 u8 ar;
855
856 vcpu->stat.instruction_stsi++;
857 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
858
859 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
860 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
861
862 if (fc > 3) {
863 kvm_s390_set_psw_cc(vcpu, 3);
864 return 0;
865 }
866
867 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
868 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
869 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
870
871 if (fc == 0) {
872 vcpu->run->s.regs.gprs[0] = 3 << 28;
873 kvm_s390_set_psw_cc(vcpu, 0);
874 return 0;
875 }
876
877 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
878
879 if (!kvm_s390_pv_cpu_is_protected(vcpu) && (operand2 & 0xfff))
880 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
881
882 switch (fc) {
883 case 1:
884 case 2:
885 mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
886 if (!mem)
887 goto out_no_data;
888 if (stsi((void *) mem, fc, sel1, sel2))
889 goto out_no_data;
890 break;
891 case 3:
892 if (sel1 != 2 || sel2 != 2)
893 goto out_no_data;
894 mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
895 if (!mem)
896 goto out_no_data;
897 handle_stsi_3_2_2(vcpu, (void *) mem);
898 break;
899 }
900 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
901 memcpy((void *)sida_origin(vcpu->arch.sie_block), (void *)mem,
902 PAGE_SIZE);
903 rc = 0;
904 } else {
905 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
906 }
907 if (rc) {
908 rc = kvm_s390_inject_prog_cond(vcpu, rc);
909 goto out;
910 }
911 if (vcpu->kvm->arch.user_stsi) {
912 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
913 rc = -EREMOTE;
914 }
915 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
916 free_page(mem);
917 kvm_s390_set_psw_cc(vcpu, 0);
918 vcpu->run->s.regs.gprs[0] = 0;
919 return rc;
920out_no_data:
921 kvm_s390_set_psw_cc(vcpu, 3);
922out:
923 free_page(mem);
924 return rc;
925}
926
927int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
928{
929 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
930 case 0x02:
931 return handle_stidp(vcpu);
932 case 0x04:
933 return handle_set_clock(vcpu);
934 case 0x10:
935 return handle_set_prefix(vcpu);
936 case 0x11:
937 return handle_store_prefix(vcpu);
938 case 0x12:
939 return handle_store_cpu_address(vcpu);
940 case 0x14:
941 return kvm_s390_handle_vsie(vcpu);
942 case 0x21:
943 case 0x50:
944 return handle_ipte_interlock(vcpu);
945 case 0x29:
946 return handle_iske(vcpu);
947 case 0x2a:
948 return handle_rrbe(vcpu);
949 case 0x2b:
950 return handle_sske(vcpu);
951 case 0x2c:
952 return handle_test_block(vcpu);
953 case 0x30:
954 case 0x31:
955 case 0x32:
956 case 0x33:
957 case 0x34:
958 case 0x35:
959 case 0x36:
960 case 0x37:
961 case 0x38:
962 case 0x39:
963 case 0x3a:
964 case 0x3b:
965 case 0x3c:
966 case 0x5f:
967 case 0x74:
968 case 0x76:
969 return handle_io_inst(vcpu);
970 case 0x56:
971 return handle_sthyi(vcpu);
972 case 0x7d:
973 return handle_stsi(vcpu);
974 case 0xaf:
975 return handle_pqap(vcpu);
976 case 0xb1:
977 return handle_stfl(vcpu);
978 case 0xb2:
979 return handle_lpswe(vcpu);
980 default:
981 return -EOPNOTSUPP;
982 }
983}
984
985static int handle_epsw(struct kvm_vcpu *vcpu)
986{
987 int reg1, reg2;
988
989 vcpu->stat.instruction_epsw++;
990
991 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
992
993
994 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
995 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
996 if (reg2) {
997 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
998 vcpu->run->s.regs.gprs[reg2] |=
999 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
1000 }
1001 return 0;
1002}
1003
1004#define PFMF_RESERVED 0xfffc0101UL
1005#define PFMF_SK 0x00020000UL
1006#define PFMF_CF 0x00010000UL
1007#define PFMF_UI 0x00008000UL
1008#define PFMF_FSC 0x00007000UL
1009#define PFMF_NQ 0x00000800UL
1010#define PFMF_MR 0x00000400UL
1011#define PFMF_MC 0x00000200UL
1012#define PFMF_KEY 0x000000feUL
1013
1014static int handle_pfmf(struct kvm_vcpu *vcpu)
1015{
1016 bool mr = false, mc = false, nq;
1017 int reg1, reg2;
1018 unsigned long start, end;
1019 unsigned char key;
1020
1021 vcpu->stat.instruction_pfmf++;
1022
1023 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
1024
1025 if (!test_kvm_facility(vcpu->kvm, 8))
1026 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1027
1028 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1029 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1030
1031 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
1032 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1033
1034
1035 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
1036 !test_kvm_facility(vcpu->kvm, 14))
1037 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1038
1039
1040 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
1041 test_kvm_facility(vcpu->kvm, 10)) {
1042 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
1043 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
1044 }
1045
1046 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
1047 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
1048 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
1049 start = kvm_s390_logical_to_effective(vcpu, start);
1050
1051 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
1052 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
1053 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
1054 }
1055
1056 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1057 case 0x00000000:
1058
1059 start = kvm_s390_real_to_abs(vcpu, start);
1060 end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1061 break;
1062 case 0x00001000:
1063 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
1064 break;
1065 case 0x00002000:
1066
1067
1068 if (!test_kvm_facility(vcpu->kvm, 78) ||
1069 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
1070 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1071 end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1);
1072 break;
1073 default:
1074 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1075 }
1076
1077 while (start != end) {
1078 unsigned long vmaddr;
1079 bool unlocked = false;
1080
1081
1082 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
1083 if (kvm_is_error_hva(vmaddr))
1084 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1085
1086 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
1087 if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
1088 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1089 }
1090
1091 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
1092 int rc = kvm_s390_skey_check_enable(vcpu);
1093
1094 if (rc)
1095 return rc;
1096 mmap_read_lock(current->mm);
1097 rc = cond_set_guest_storage_key(current->mm, vmaddr,
1098 key, NULL, nq, mr, mc);
1099 if (rc < 0) {
1100 rc = fixup_user_fault(current->mm, vmaddr,
1101 FAULT_FLAG_WRITE, &unlocked);
1102 rc = !rc ? -EAGAIN : rc;
1103 }
1104 mmap_read_unlock(current->mm);
1105 if (rc == -EFAULT)
1106 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1107 if (rc == -EAGAIN)
1108 continue;
1109 if (rc < 0)
1110 return rc;
1111 }
1112 start += PAGE_SIZE;
1113 }
1114 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1115 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
1116 vcpu->run->s.regs.gprs[reg2] = end;
1117 } else {
1118 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
1119 end = kvm_s390_logical_to_effective(vcpu, end);
1120 vcpu->run->s.regs.gprs[reg2] |= end;
1121 }
1122 }
1123 return 0;
1124}
1125
1126
1127
1128
1129static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
1130{
1131 int r1, r2, nappended, entries;
1132 unsigned long gfn, hva, res, pgstev, ptev;
1133 unsigned long *cbrlo;
1134
1135
1136
1137
1138
1139
1140 kvm_s390_get_regs_rre(vcpu, &r1, &r2);
1141 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
1142 hva = gfn_to_hva(vcpu->kvm, gfn);
1143 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1144
1145 if (kvm_is_error_hva(hva))
1146 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1147
1148 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
1149 if (nappended < 0) {
1150 res = orc ? 0x10 : 0;
1151 vcpu->run->s.regs.gprs[r1] = res;
1152 return 0;
1153 }
1154 res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22;
1155
1156
1157
1158
1159
1160
1161 if (ptev & _PAGE_INVALID) {
1162 res |= 2;
1163 if (pgstev & _PGSTE_GPS_ZERO)
1164 res |= 1;
1165 }
1166 if (pgstev & _PGSTE_GPS_NODAT)
1167 res |= 0x20;
1168 vcpu->run->s.regs.gprs[r1] = res;
1169
1170
1171
1172
1173
1174
1175 if (nappended > 0) {
1176 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
1177 cbrlo[entries] = gfn << PAGE_SHIFT;
1178 }
1179
1180 if (orc) {
1181 struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn);
1182
1183
1184 if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
1185 atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages);
1186 }
1187
1188 return nappended;
1189}
1190
1191static int handle_essa(struct kvm_vcpu *vcpu)
1192{
1193
1194 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1195 unsigned long *cbrlo;
1196 struct gmap *gmap;
1197 int i, orc;
1198
1199 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
1200 gmap = vcpu->arch.gmap;
1201 vcpu->stat.instruction_essa++;
1202 if (!vcpu->kvm->arch.use_cmma)
1203 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1204
1205 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1206 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1207
1208 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
1209
1210 if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT
1211 : ESSA_SET_STABLE_IF_RESIDENT))
1212 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1213
1214 if (!vcpu->kvm->arch.migration_mode) {
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224 if (vcpu->kvm->mm->context.uses_cmm == 0) {
1225 mmap_write_lock(vcpu->kvm->mm);
1226 vcpu->kvm->mm->context.uses_cmm = 1;
1227 mmap_write_unlock(vcpu->kvm->mm);
1228 }
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
1239
1240 kvm_s390_retry_instr(vcpu);
1241 } else {
1242 int srcu_idx;
1243
1244 mmap_read_lock(vcpu->kvm->mm);
1245 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1246 i = __do_essa(vcpu, orc);
1247 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1248 mmap_read_unlock(vcpu->kvm->mm);
1249 if (i < 0)
1250 return i;
1251
1252 entries += i;
1253 }
1254 vcpu->arch.sie_block->cbrlo &= PAGE_MASK;
1255 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
1256 mmap_read_lock(gmap->mm);
1257 for (i = 0; i < entries; ++i)
1258 __gmap_zap(gmap, cbrlo[i]);
1259 mmap_read_unlock(gmap->mm);
1260 return 0;
1261}
1262
1263int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
1264{
1265 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1266 case 0x8a:
1267 case 0x8e:
1268 case 0x8f:
1269 return handle_ipte_interlock(vcpu);
1270 case 0x8d:
1271 return handle_epsw(vcpu);
1272 case 0xab:
1273 return handle_essa(vcpu);
1274 case 0xaf:
1275 return handle_pfmf(vcpu);
1276 default:
1277 return -EOPNOTSUPP;
1278 }
1279}
1280
1281int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
1282{
1283 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1284 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1285 int reg, rc, nr_regs;
1286 u32 ctl_array[16];
1287 u64 ga;
1288 u8 ar;
1289
1290 vcpu->stat.instruction_lctl++;
1291
1292 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1293 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1294
1295 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1296
1297 if (ga & 3)
1298 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1299
1300 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1301 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
1302
1303 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1304 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1305 if (rc)
1306 return kvm_s390_inject_prog_cond(vcpu, rc);
1307 reg = reg1;
1308 nr_regs = 0;
1309 do {
1310 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
1311 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
1312 if (reg == reg3)
1313 break;
1314 reg = (reg + 1) % 16;
1315 } while (1);
1316 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1317 return 0;
1318}
1319
1320int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1321{
1322 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1323 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1324 int reg, rc, nr_regs;
1325 u32 ctl_array[16];
1326 u64 ga;
1327 u8 ar;
1328
1329 vcpu->stat.instruction_stctl++;
1330
1331 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1332 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1333
1334 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1335
1336 if (ga & 3)
1337 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1338
1339 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1340 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
1341
1342 reg = reg1;
1343 nr_regs = 0;
1344 do {
1345 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1346 if (reg == reg3)
1347 break;
1348 reg = (reg + 1) % 16;
1349 } while (1);
1350 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1351 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1352}
1353
1354static int handle_lctlg(struct kvm_vcpu *vcpu)
1355{
1356 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1357 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1358 int reg, rc, nr_regs;
1359 u64 ctl_array[16];
1360 u64 ga;
1361 u8 ar;
1362
1363 vcpu->stat.instruction_lctlg++;
1364
1365 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1366 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1367
1368 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1369
1370 if (ga & 7)
1371 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1372
1373 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1374 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1375
1376 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1377 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1378 if (rc)
1379 return kvm_s390_inject_prog_cond(vcpu, rc);
1380 reg = reg1;
1381 nr_regs = 0;
1382 do {
1383 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1384 if (reg == reg3)
1385 break;
1386 reg = (reg + 1) % 16;
1387 } while (1);
1388 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1389 return 0;
1390}
1391
1392static int handle_stctg(struct kvm_vcpu *vcpu)
1393{
1394 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1395 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1396 int reg, rc, nr_regs;
1397 u64 ctl_array[16];
1398 u64 ga;
1399 u8 ar;
1400
1401 vcpu->stat.instruction_stctg++;
1402
1403 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1404 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1405
1406 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1407
1408 if (ga & 7)
1409 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1410
1411 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1412 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
1413
1414 reg = reg1;
1415 nr_regs = 0;
1416 do {
1417 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1418 if (reg == reg3)
1419 break;
1420 reg = (reg + 1) % 16;
1421 } while (1);
1422 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1423 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1424}
1425
1426int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1427{
1428 switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
1429 case 0x25:
1430 return handle_stctg(vcpu);
1431 case 0x2f:
1432 return handle_lctlg(vcpu);
1433 case 0x60:
1434 case 0x61:
1435 case 0x62:
1436 return handle_ri(vcpu);
1437 default:
1438 return -EOPNOTSUPP;
1439 }
1440}
1441
1442static int handle_tprot(struct kvm_vcpu *vcpu)
1443{
1444 u64 address1, address2;
1445 unsigned long hva, gpa;
1446 int ret = 0, cc = 0;
1447 bool writable;
1448 u8 ar;
1449
1450 vcpu->stat.instruction_tprot++;
1451
1452 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1453 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1454
1455 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
1456
1457
1458
1459
1460 if (address2 & 0xf0)
1461 return -EOPNOTSUPP;
1462 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1463 ipte_lock(vcpu);
1464 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
1465 if (ret == PGM_PROTECTION) {
1466
1467 cc = 1;
1468 ret = guest_translate_address(vcpu, address1, ar, &gpa,
1469 GACC_FETCH);
1470 }
1471 if (ret) {
1472 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
1473 ret = kvm_s390_inject_program_int(vcpu, ret);
1474 } else if (ret > 0) {
1475
1476 kvm_s390_set_psw_cc(vcpu, 3);
1477 ret = 0;
1478 }
1479 goto out_unlock;
1480 }
1481
1482 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
1483 if (kvm_is_error_hva(hva)) {
1484 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1485 } else {
1486 if (!writable)
1487 cc = 1;
1488 kvm_s390_set_psw_cc(vcpu, cc);
1489
1490 }
1491out_unlock:
1492 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1493 ipte_unlock(vcpu);
1494 return ret;
1495}
1496
1497int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
1498{
1499 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1500 case 0x01:
1501 return handle_tprot(vcpu);
1502 default:
1503 return -EOPNOTSUPP;
1504 }
1505}
1506
1507static int handle_sckpf(struct kvm_vcpu *vcpu)
1508{
1509 u32 value;
1510
1511 vcpu->stat.instruction_sckpf++;
1512
1513 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1514 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1515
1516 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
1517 return kvm_s390_inject_program_int(vcpu,
1518 PGM_SPECIFICATION);
1519
1520 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1521 vcpu->arch.sie_block->todpr = value;
1522
1523 return 0;
1524}
1525
1526static int handle_ptff(struct kvm_vcpu *vcpu)
1527{
1528 vcpu->stat.instruction_ptff++;
1529
1530
1531 kvm_s390_set_psw_cc(vcpu, 3);
1532 return 0;
1533}
1534
1535int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1536{
1537 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1538 case 0x04:
1539 return handle_ptff(vcpu);
1540 case 0x07:
1541 return handle_sckpf(vcpu);
1542 default:
1543 return -EOPNOTSUPP;
1544 }
1545}
1546