1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kvm.h>
15#include <linux/gfp.h>
16#include <linux/errno.h>
17#include <linux/compat.h>
18#include <linux/mm_types.h>
19
20#include <asm/asm-offsets.h>
21#include <asm/facility.h>
22#include <asm/current.h>
23#include <asm/debug.h>
24#include <asm/ebcdic.h>
25#include <asm/sysinfo.h>
26#include <asm/pgtable.h>
27#include <asm/page-states.h>
28#include <asm/pgalloc.h>
29#include <asm/gmap.h>
30#include <asm/io.h>
31#include <asm/ptrace.h>
32#include <asm/compat.h>
33#include <asm/sclp.h>
34#include "gaccess.h"
35#include "kvm-s390.h"
36#include "trace.h"
37
38static int handle_ri(struct kvm_vcpu *vcpu)
39{
40 if (test_kvm_facility(vcpu->kvm, 64)) {
41 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
42 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
43 kvm_s390_retry_instr(vcpu);
44 return 0;
45 } else
46 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
47}
48
49int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
50{
51 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
52 return handle_ri(vcpu);
53 else
54 return -EOPNOTSUPP;
55}
56
57static int handle_gs(struct kvm_vcpu *vcpu)
58{
59 if (test_kvm_facility(vcpu->kvm, 133)) {
60 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
61 preempt_disable();
62 __ctl_set_bit(2, 4);
63 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
64 restore_gs_cb(current->thread.gs_cb);
65 preempt_enable();
66 vcpu->arch.sie_block->ecb |= ECB_GS;
67 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
68 vcpu->arch.gs_enabled = 1;
69 kvm_s390_retry_instr(vcpu);
70 return 0;
71 } else
72 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
73}
74
75int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
76{
77 int code = vcpu->arch.sie_block->ipb & 0xff;
78
79 if (code == 0x49 || code == 0x4d)
80 return handle_gs(vcpu);
81 else
82 return -EOPNOTSUPP;
83}
84
85static int handle_set_clock(struct kvm_vcpu *vcpu)
86{
87 int rc;
88 u8 ar;
89 u64 op2, val;
90
91 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
92 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
93
94 op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
95 if (op2 & 7)
96 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
97 rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
98 if (rc)
99 return kvm_s390_inject_prog_cond(vcpu, rc);
100
101 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
102 kvm_s390_set_tod_clock(vcpu->kvm, val);
103
104 kvm_s390_set_psw_cc(vcpu, 0);
105 return 0;
106}
107
108static int handle_set_prefix(struct kvm_vcpu *vcpu)
109{
110 u64 operand2;
111 u32 address;
112 int rc;
113 u8 ar;
114
115 vcpu->stat.instruction_spx++;
116
117 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
118 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
119
120 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
121
122
123 if (operand2 & 3)
124 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
125
126
127 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
128 if (rc)
129 return kvm_s390_inject_prog_cond(vcpu, rc);
130
131 address &= 0x7fffe000u;
132
133
134
135
136
137
138 if (kvm_is_error_gpa(vcpu->kvm, address))
139 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
140
141 kvm_s390_set_prefix(vcpu, address);
142 trace_kvm_s390_handle_prefix(vcpu, 1, address);
143 return 0;
144}
145
146static int handle_store_prefix(struct kvm_vcpu *vcpu)
147{
148 u64 operand2;
149 u32 address;
150 int rc;
151 u8 ar;
152
153 vcpu->stat.instruction_stpx++;
154
155 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
156 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
157
158 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
159
160
161 if (operand2 & 3)
162 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
163
164 address = kvm_s390_get_prefix(vcpu);
165
166
167 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
168 if (rc)
169 return kvm_s390_inject_prog_cond(vcpu, rc);
170
171 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
172 trace_kvm_s390_handle_prefix(vcpu, 0, address);
173 return 0;
174}
175
176static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
177{
178 u16 vcpu_id = vcpu->vcpu_id;
179 u64 ga;
180 int rc;
181 u8 ar;
182
183 vcpu->stat.instruction_stap++;
184
185 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
186 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
187
188 ga = kvm_s390_get_base_disp_s(vcpu, &ar);
189
190 if (ga & 1)
191 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
192
193 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
194 if (rc)
195 return kvm_s390_inject_prog_cond(vcpu, rc);
196
197 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
198 trace_kvm_s390_handle_stap(vcpu, ga);
199 return 0;
200}
201
202int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
203{
204 int rc = 0;
205 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
206
207 trace_kvm_s390_skey_related_inst(vcpu);
208 if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) &&
209 !(atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS))
210 return rc;
211
212 rc = s390_enable_skey();
213 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
214 if (!rc) {
215 if (atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS)
216 atomic_andnot(CPUSTAT_KSS, &sie_block->cpuflags);
217 else
218 sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE |
219 ICTL_RRBE);
220 }
221 return rc;
222}
223
224static int try_handle_skey(struct kvm_vcpu *vcpu)
225{
226 int rc;
227
228 vcpu->stat.instruction_storage_key++;
229 rc = kvm_s390_skey_check_enable(vcpu);
230 if (rc)
231 return rc;
232 if (sclp.has_skey) {
233
234 kvm_s390_retry_instr(vcpu);
235 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
236 return -EAGAIN;
237 }
238 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
239 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
240 return 0;
241}
242
243static int handle_iske(struct kvm_vcpu *vcpu)
244{
245 unsigned long addr;
246 unsigned char key;
247 int reg1, reg2;
248 int rc;
249
250 rc = try_handle_skey(vcpu);
251 if (rc)
252 return rc != -EAGAIN ? rc : 0;
253
254 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
255
256 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
257 addr = kvm_s390_logical_to_effective(vcpu, addr);
258 addr = kvm_s390_real_to_abs(vcpu, addr);
259 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
260 if (kvm_is_error_hva(addr))
261 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
262
263 down_read(¤t->mm->mmap_sem);
264 rc = get_guest_storage_key(current->mm, addr, &key);
265 up_read(¤t->mm->mmap_sem);
266 if (rc)
267 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
268 vcpu->run->s.regs.gprs[reg1] &= ~0xff;
269 vcpu->run->s.regs.gprs[reg1] |= key;
270 return 0;
271}
272
273static int handle_rrbe(struct kvm_vcpu *vcpu)
274{
275 unsigned long addr;
276 int reg1, reg2;
277 int rc;
278
279 rc = try_handle_skey(vcpu);
280 if (rc)
281 return rc != -EAGAIN ? rc : 0;
282
283 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
284
285 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
286 addr = kvm_s390_logical_to_effective(vcpu, addr);
287 addr = kvm_s390_real_to_abs(vcpu, addr);
288 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
289 if (kvm_is_error_hva(addr))
290 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
291
292 down_read(¤t->mm->mmap_sem);
293 rc = reset_guest_reference_bit(current->mm, addr);
294 up_read(¤t->mm->mmap_sem);
295 if (rc < 0)
296 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
297
298 kvm_s390_set_psw_cc(vcpu, rc);
299 return 0;
300}
301
302#define SSKE_NQ 0x8
303#define SSKE_MR 0x4
304#define SSKE_MC 0x2
305#define SSKE_MB 0x1
306static int handle_sske(struct kvm_vcpu *vcpu)
307{
308 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
309 unsigned long start, end;
310 unsigned char key, oldkey;
311 int reg1, reg2;
312 int rc;
313
314 rc = try_handle_skey(vcpu);
315 if (rc)
316 return rc != -EAGAIN ? rc : 0;
317
318 if (!test_kvm_facility(vcpu->kvm, 8))
319 m3 &= ~SSKE_MB;
320 if (!test_kvm_facility(vcpu->kvm, 10))
321 m3 &= ~(SSKE_MC | SSKE_MR);
322 if (!test_kvm_facility(vcpu->kvm, 14))
323 m3 &= ~SSKE_NQ;
324
325 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
326
327 key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
328 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
329 start = kvm_s390_logical_to_effective(vcpu, start);
330 if (m3 & SSKE_MB) {
331
332 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
333 } else {
334 start = kvm_s390_real_to_abs(vcpu, start);
335 end = start + PAGE_SIZE;
336 }
337
338 while (start != end) {
339 unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
340
341 if (kvm_is_error_hva(addr))
342 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
343
344 down_read(¤t->mm->mmap_sem);
345 rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
346 m3 & SSKE_NQ, m3 & SSKE_MR,
347 m3 & SSKE_MC);
348 up_read(¤t->mm->mmap_sem);
349 if (rc < 0)
350 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
351 start += PAGE_SIZE;
352 }
353
354 if (m3 & (SSKE_MC | SSKE_MR)) {
355 if (m3 & SSKE_MB) {
356
357 kvm_s390_set_psw_cc(vcpu, 3);
358 } else {
359 kvm_s390_set_psw_cc(vcpu, rc);
360 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
361 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
362 }
363 }
364 if (m3 & SSKE_MB) {
365 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
366 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
367 else
368 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
369 end = kvm_s390_logical_to_effective(vcpu, end);
370 vcpu->run->s.regs.gprs[reg2] |= end;
371 }
372 return 0;
373}
374
375static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
376{
377 vcpu->stat.instruction_ipte_interlock++;
378 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
379 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
380 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
381 kvm_s390_retry_instr(vcpu);
382 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
383 return 0;
384}
385
386static int handle_test_block(struct kvm_vcpu *vcpu)
387{
388 gpa_t addr;
389 int reg2;
390
391 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
392 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
393
394 kvm_s390_get_regs_rre(vcpu, NULL, ®2);
395 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
396 addr = kvm_s390_logical_to_effective(vcpu, addr);
397 if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
398 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
399 addr = kvm_s390_real_to_abs(vcpu, addr);
400
401 if (kvm_is_error_gpa(vcpu->kvm, addr))
402 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
403
404
405
406
407 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
408 return -EFAULT;
409 kvm_s390_set_psw_cc(vcpu, 0);
410 vcpu->run->s.regs.gprs[0] = 0;
411 return 0;
412}
413
414static int handle_tpi(struct kvm_vcpu *vcpu)
415{
416 struct kvm_s390_interrupt_info *inti;
417 unsigned long len;
418 u32 tpi_data[3];
419 int rc;
420 u64 addr;
421 u8 ar;
422
423 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
424 if (addr & 3)
425 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
426
427 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
428 if (!inti) {
429 kvm_s390_set_psw_cc(vcpu, 0);
430 return 0;
431 }
432
433 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
434 tpi_data[1] = inti->io.io_int_parm;
435 tpi_data[2] = inti->io.io_int_word;
436 if (addr) {
437
438
439
440
441 len = sizeof(tpi_data) - 4;
442 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
443 if (rc) {
444 rc = kvm_s390_inject_prog_cond(vcpu, rc);
445 goto reinject_interrupt;
446 }
447 } else {
448
449
450
451
452 len = sizeof(tpi_data);
453 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
454
455 rc = -EFAULT;
456 goto reinject_interrupt;
457 }
458 }
459
460
461 kfree(inti);
462 kvm_s390_set_psw_cc(vcpu, 1);
463 return 0;
464reinject_interrupt:
465
466
467
468
469
470 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
471 kfree(inti);
472 rc = -EFAULT;
473 }
474
475 return rc ? -EFAULT : 0;
476}
477
478static int handle_tsch(struct kvm_vcpu *vcpu)
479{
480 struct kvm_s390_interrupt_info *inti = NULL;
481 const u64 isc_mask = 0xffUL << 24;
482
483
484 if (vcpu->run->s.regs.gprs[1])
485 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
486 vcpu->run->s.regs.gprs[1]);
487
488
489
490
491
492
493
494
495
496 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
497 vcpu->run->s390_tsch.dequeued = !!inti;
498 if (inti) {
499 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
500 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
501 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
502 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
503 }
504 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
505 kfree(inti);
506 return -EREMOTE;
507}
508
509static int handle_io_inst(struct kvm_vcpu *vcpu)
510{
511 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
512
513 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
514 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
515
516 if (vcpu->kvm->arch.css_support) {
517
518
519
520
521 if (vcpu->arch.sie_block->ipa == 0xb236)
522 return handle_tpi(vcpu);
523 if (vcpu->arch.sie_block->ipa == 0xb235)
524 return handle_tsch(vcpu);
525
526 return -EOPNOTSUPP;
527 } else {
528
529
530
531
532 kvm_s390_set_psw_cc(vcpu, 3);
533 return 0;
534 }
535}
536
537static int handle_stfl(struct kvm_vcpu *vcpu)
538{
539 int rc;
540 unsigned int fac;
541
542 vcpu->stat.instruction_stfl++;
543
544 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
545 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
546
547
548
549
550
551 fac = *vcpu->kvm->arch.model.fac_list >> 32;
552 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
553 &fac, sizeof(fac));
554 if (rc)
555 return rc;
556 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
557 trace_kvm_s390_handle_stfl(vcpu, fac);
558 return 0;
559}
560
561#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
562#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
563#define PSW_ADDR_24 0x0000000000ffffffUL
564#define PSW_ADDR_31 0x000000007fffffffUL
565
566int is_valid_psw(psw_t *psw)
567{
568 if (psw->mask & PSW_MASK_UNASSIGNED)
569 return 0;
570 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
571 if (psw->addr & ~PSW_ADDR_31)
572 return 0;
573 }
574 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
575 return 0;
576 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
577 return 0;
578 if (psw->addr & 1)
579 return 0;
580 return 1;
581}
582
583int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
584{
585 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
586 psw_compat_t new_psw;
587 u64 addr;
588 int rc;
589 u8 ar;
590
591 if (gpsw->mask & PSW_MASK_PSTATE)
592 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
593
594 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
595 if (addr & 7)
596 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
597
598 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
599 if (rc)
600 return kvm_s390_inject_prog_cond(vcpu, rc);
601 if (!(new_psw.mask & PSW32_MASK_BASE))
602 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
603 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
604 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
605 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
606 if (!is_valid_psw(gpsw))
607 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
608 return 0;
609}
610
611static int handle_lpswe(struct kvm_vcpu *vcpu)
612{
613 psw_t new_psw;
614 u64 addr;
615 int rc;
616 u8 ar;
617
618 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
619 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
620
621 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
622 if (addr & 7)
623 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
624 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
625 if (rc)
626 return kvm_s390_inject_prog_cond(vcpu, rc);
627 vcpu->arch.sie_block->gpsw = new_psw;
628 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
629 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
630 return 0;
631}
632
633static int handle_stidp(struct kvm_vcpu *vcpu)
634{
635 u64 stidp_data = vcpu->kvm->arch.model.cpuid;
636 u64 operand2;
637 int rc;
638 u8 ar;
639
640 vcpu->stat.instruction_stidp++;
641
642 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
643 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
644
645 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
646
647 if (operand2 & 7)
648 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
649
650 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
651 if (rc)
652 return kvm_s390_inject_prog_cond(vcpu, rc);
653
654 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
655 return 0;
656}
657
658static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
659{
660 int cpus = 0;
661 int n;
662
663 cpus = atomic_read(&vcpu->kvm->online_vcpus);
664
665
666 if (stsi(mem, 3, 2, 2))
667 mem->count = 0;
668 if (mem->count < 8)
669 mem->count++;
670 for (n = mem->count - 1; n > 0 ; n--)
671 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
672
673 memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
674 mem->vm[0].cpus_total = cpus;
675 mem->vm[0].cpus_configured = cpus;
676 mem->vm[0].cpus_standby = 0;
677 mem->vm[0].cpus_reserved = 0;
678 mem->vm[0].caf = 1000;
679 memcpy(mem->vm[0].name, "KVMguest", 8);
680 ASCEBC(mem->vm[0].name, 8);
681 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
682 ASCEBC(mem->vm[0].cpi, 16);
683}
684
685static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
686 u8 fc, u8 sel1, u16 sel2)
687{
688 vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
689 vcpu->run->s390_stsi.addr = addr;
690 vcpu->run->s390_stsi.ar = ar;
691 vcpu->run->s390_stsi.fc = fc;
692 vcpu->run->s390_stsi.sel1 = sel1;
693 vcpu->run->s390_stsi.sel2 = sel2;
694}
695
696static int handle_stsi(struct kvm_vcpu *vcpu)
697{
698 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
699 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
700 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
701 unsigned long mem = 0;
702 u64 operand2;
703 int rc = 0;
704 u8 ar;
705
706 vcpu->stat.instruction_stsi++;
707 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
708
709 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
710 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
711
712 if (fc > 3) {
713 kvm_s390_set_psw_cc(vcpu, 3);
714 return 0;
715 }
716
717 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
718 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
719 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
720
721 if (fc == 0) {
722 vcpu->run->s.regs.gprs[0] = 3 << 28;
723 kvm_s390_set_psw_cc(vcpu, 0);
724 return 0;
725 }
726
727 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
728
729 if (operand2 & 0xfff)
730 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
731
732 switch (fc) {
733 case 1:
734 case 2:
735 mem = get_zeroed_page(GFP_KERNEL);
736 if (!mem)
737 goto out_no_data;
738 if (stsi((void *) mem, fc, sel1, sel2))
739 goto out_no_data;
740 break;
741 case 3:
742 if (sel1 != 2 || sel2 != 2)
743 goto out_no_data;
744 mem = get_zeroed_page(GFP_KERNEL);
745 if (!mem)
746 goto out_no_data;
747 handle_stsi_3_2_2(vcpu, (void *) mem);
748 break;
749 }
750
751 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
752 if (rc) {
753 rc = kvm_s390_inject_prog_cond(vcpu, rc);
754 goto out;
755 }
756 if (vcpu->kvm->arch.user_stsi) {
757 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
758 rc = -EREMOTE;
759 }
760 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
761 free_page(mem);
762 kvm_s390_set_psw_cc(vcpu, 0);
763 vcpu->run->s.regs.gprs[0] = 0;
764 return rc;
765out_no_data:
766 kvm_s390_set_psw_cc(vcpu, 3);
767out:
768 free_page(mem);
769 return rc;
770}
771
772static const intercept_handler_t b2_handlers[256] = {
773 [0x02] = handle_stidp,
774 [0x04] = handle_set_clock,
775 [0x10] = handle_set_prefix,
776 [0x11] = handle_store_prefix,
777 [0x12] = handle_store_cpu_address,
778 [0x14] = kvm_s390_handle_vsie,
779 [0x21] = handle_ipte_interlock,
780 [0x29] = handle_iske,
781 [0x2a] = handle_rrbe,
782 [0x2b] = handle_sske,
783 [0x2c] = handle_test_block,
784 [0x30] = handle_io_inst,
785 [0x31] = handle_io_inst,
786 [0x32] = handle_io_inst,
787 [0x33] = handle_io_inst,
788 [0x34] = handle_io_inst,
789 [0x35] = handle_io_inst,
790 [0x36] = handle_io_inst,
791 [0x37] = handle_io_inst,
792 [0x38] = handle_io_inst,
793 [0x39] = handle_io_inst,
794 [0x3a] = handle_io_inst,
795 [0x3b] = handle_io_inst,
796 [0x3c] = handle_io_inst,
797 [0x50] = handle_ipte_interlock,
798 [0x56] = handle_sthyi,
799 [0x5f] = handle_io_inst,
800 [0x74] = handle_io_inst,
801 [0x76] = handle_io_inst,
802 [0x7d] = handle_stsi,
803 [0xb1] = handle_stfl,
804 [0xb2] = handle_lpswe,
805};
806
807int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
808{
809 intercept_handler_t handler;
810
811
812
813
814
815
816 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
817 if (handler)
818 return handler(vcpu);
819
820 return -EOPNOTSUPP;
821}
822
823static int handle_epsw(struct kvm_vcpu *vcpu)
824{
825 int reg1, reg2;
826
827 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
828
829
830 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
831 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
832 if (reg2) {
833 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
834 vcpu->run->s.regs.gprs[reg2] |=
835 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
836 }
837 return 0;
838}
839
840#define PFMF_RESERVED 0xfffc0101UL
841#define PFMF_SK 0x00020000UL
842#define PFMF_CF 0x00010000UL
843#define PFMF_UI 0x00008000UL
844#define PFMF_FSC 0x00007000UL
845#define PFMF_NQ 0x00000800UL
846#define PFMF_MR 0x00000400UL
847#define PFMF_MC 0x00000200UL
848#define PFMF_KEY 0x000000feUL
849
850static int handle_pfmf(struct kvm_vcpu *vcpu)
851{
852 bool mr = false, mc = false, nq;
853 int reg1, reg2;
854 unsigned long start, end;
855 unsigned char key;
856
857 vcpu->stat.instruction_pfmf++;
858
859 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
860
861 if (!test_kvm_facility(vcpu->kvm, 8))
862 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
863
864 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
865 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
866
867 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
868 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
869
870
871 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
872 !test_kvm_facility(vcpu->kvm, 14))
873 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
874
875
876 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
877 test_kvm_facility(vcpu->kvm, 10)) {
878 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
879 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
880 }
881
882 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
883 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
884 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
885 start = kvm_s390_logical_to_effective(vcpu, start);
886
887 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
888 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
889 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
890 }
891
892 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
893 case 0x00000000:
894
895 start = kvm_s390_real_to_abs(vcpu, start);
896 end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
897 break;
898 case 0x00001000:
899 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
900 break;
901 case 0x00002000:
902
903
904 if (!test_kvm_facility(vcpu->kvm, 78) ||
905 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
906 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
907 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
908 break;
909 default:
910 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
911 }
912
913 while (start != end) {
914 unsigned long useraddr;
915
916
917 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
918 if (kvm_is_error_hva(useraddr))
919 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
920
921 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
922 if (clear_user((void __user *)useraddr, PAGE_SIZE))
923 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
924 }
925
926 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
927 int rc = kvm_s390_skey_check_enable(vcpu);
928
929 if (rc)
930 return rc;
931 down_read(¤t->mm->mmap_sem);
932 rc = cond_set_guest_storage_key(current->mm, useraddr,
933 key, NULL, nq, mr, mc);
934 up_read(¤t->mm->mmap_sem);
935 if (rc < 0)
936 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
937 }
938
939 start += PAGE_SIZE;
940 }
941 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
942 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
943 vcpu->run->s.regs.gprs[reg2] = end;
944 } else {
945 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
946 end = kvm_s390_logical_to_effective(vcpu, end);
947 vcpu->run->s.regs.gprs[reg2] |= end;
948 }
949 }
950 return 0;
951}
952
953static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
954{
955 struct kvm_s390_migration_state *ms = vcpu->kvm->arch.migration_state;
956 int r1, r2, nappended, entries;
957 unsigned long gfn, hva, res, pgstev, ptev;
958 unsigned long *cbrlo;
959
960
961
962
963
964
965 kvm_s390_get_regs_rre(vcpu, &r1, &r2);
966 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
967 hva = gfn_to_hva(vcpu->kvm, gfn);
968 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
969
970 if (kvm_is_error_hva(hva))
971 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
972
973 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
974 if (nappended < 0) {
975 res = orc ? 0x10 : 0;
976 vcpu->run->s.regs.gprs[r1] = res;
977 return 0;
978 }
979 res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22;
980
981
982
983
984
985
986 if (ptev & _PAGE_INVALID) {
987 res |= 2;
988 if (pgstev & _PGSTE_GPS_ZERO)
989 res |= 1;
990 }
991 vcpu->run->s.regs.gprs[r1] = res;
992
993
994
995
996
997
998 if (nappended > 0) {
999 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
1000 cbrlo[entries] = gfn << PAGE_SHIFT;
1001 }
1002
1003 if (orc) {
1004
1005 if (!test_and_set_bit(gfn, ms->pgste_bitmap))
1006 atomic64_inc(&ms->dirty_pages);
1007 }
1008
1009 return nappended;
1010}
1011
1012static int handle_essa(struct kvm_vcpu *vcpu)
1013{
1014
1015 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1016 unsigned long *cbrlo;
1017 struct gmap *gmap;
1018 int i, orc;
1019
1020 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
1021 gmap = vcpu->arch.gmap;
1022 vcpu->stat.instruction_essa++;
1023 if (!vcpu->kvm->arch.use_cmma)
1024 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1025
1026 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1027 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1028
1029 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
1030 if (orc > ESSA_MAX)
1031 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1032
1033 if (likely(!vcpu->kvm->arch.migration_state)) {
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043 if (vcpu->kvm->mm->context.use_cmma == 0) {
1044 down_write(&vcpu->kvm->mm->mmap_sem);
1045 vcpu->kvm->mm->context.use_cmma = 1;
1046 up_write(&vcpu->kvm->mm->mmap_sem);
1047 }
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
1058
1059 kvm_s390_retry_instr(vcpu);
1060 } else {
1061
1062 i = do_essa(vcpu, orc);
1063 if (i < 0)
1064 return i;
1065 entries += i;
1066 }
1067 vcpu->arch.sie_block->cbrlo &= PAGE_MASK;
1068 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
1069 down_read(&gmap->mm->mmap_sem);
1070 for (i = 0; i < entries; ++i)
1071 __gmap_zap(gmap, cbrlo[i]);
1072 up_read(&gmap->mm->mmap_sem);
1073 return 0;
1074}
1075
1076static const intercept_handler_t b9_handlers[256] = {
1077 [0x8a] = handle_ipte_interlock,
1078 [0x8d] = handle_epsw,
1079 [0x8e] = handle_ipte_interlock,
1080 [0x8f] = handle_ipte_interlock,
1081 [0xab] = handle_essa,
1082 [0xaf] = handle_pfmf,
1083};
1084
1085int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
1086{
1087 intercept_handler_t handler;
1088
1089
1090 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
1091 if (handler)
1092 return handler(vcpu);
1093
1094 return -EOPNOTSUPP;
1095}
1096
1097int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
1098{
1099 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1100 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1101 int reg, rc, nr_regs;
1102 u32 ctl_array[16];
1103 u64 ga;
1104 u8 ar;
1105
1106 vcpu->stat.instruction_lctl++;
1107
1108 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1109 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1110
1111 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1112
1113 if (ga & 3)
1114 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1115
1116 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1117 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
1118
1119 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1120 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1121 if (rc)
1122 return kvm_s390_inject_prog_cond(vcpu, rc);
1123 reg = reg1;
1124 nr_regs = 0;
1125 do {
1126 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
1127 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
1128 if (reg == reg3)
1129 break;
1130 reg = (reg + 1) % 16;
1131 } while (1);
1132 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1133 return 0;
1134}
1135
1136int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1137{
1138 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1139 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1140 int reg, rc, nr_regs;
1141 u32 ctl_array[16];
1142 u64 ga;
1143 u8 ar;
1144
1145 vcpu->stat.instruction_stctl++;
1146
1147 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1148 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1149
1150 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1151
1152 if (ga & 3)
1153 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1154
1155 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1156 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
1157
1158 reg = reg1;
1159 nr_regs = 0;
1160 do {
1161 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1162 if (reg == reg3)
1163 break;
1164 reg = (reg + 1) % 16;
1165 } while (1);
1166 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1167 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1168}
1169
1170static int handle_lctlg(struct kvm_vcpu *vcpu)
1171{
1172 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1173 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1174 int reg, rc, nr_regs;
1175 u64 ctl_array[16];
1176 u64 ga;
1177 u8 ar;
1178
1179 vcpu->stat.instruction_lctlg++;
1180
1181 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1182 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1183
1184 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1185
1186 if (ga & 7)
1187 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1188
1189 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1190 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1191
1192 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1193 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1194 if (rc)
1195 return kvm_s390_inject_prog_cond(vcpu, rc);
1196 reg = reg1;
1197 nr_regs = 0;
1198 do {
1199 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1200 if (reg == reg3)
1201 break;
1202 reg = (reg + 1) % 16;
1203 } while (1);
1204 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1205 return 0;
1206}
1207
1208static int handle_stctg(struct kvm_vcpu *vcpu)
1209{
1210 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1211 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1212 int reg, rc, nr_regs;
1213 u64 ctl_array[16];
1214 u64 ga;
1215 u8 ar;
1216
1217 vcpu->stat.instruction_stctg++;
1218
1219 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1220 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1221
1222 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1223
1224 if (ga & 7)
1225 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1226
1227 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1228 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
1229
1230 reg = reg1;
1231 nr_regs = 0;
1232 do {
1233 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1234 if (reg == reg3)
1235 break;
1236 reg = (reg + 1) % 16;
1237 } while (1);
1238 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1239 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1240}
1241
1242static const intercept_handler_t eb_handlers[256] = {
1243 [0x2f] = handle_lctlg,
1244 [0x25] = handle_stctg,
1245 [0x60] = handle_ri,
1246 [0x61] = handle_ri,
1247 [0x62] = handle_ri,
1248};
1249
1250int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1251{
1252 intercept_handler_t handler;
1253
1254 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
1255 if (handler)
1256 return handler(vcpu);
1257 return -EOPNOTSUPP;
1258}
1259
1260static int handle_tprot(struct kvm_vcpu *vcpu)
1261{
1262 u64 address1, address2;
1263 unsigned long hva, gpa;
1264 int ret = 0, cc = 0;
1265 bool writable;
1266 u8 ar;
1267
1268 vcpu->stat.instruction_tprot++;
1269
1270 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1271 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1272
1273 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
1274
1275
1276
1277
1278 if (address2 & 0xf0)
1279 return -EOPNOTSUPP;
1280 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1281 ipte_lock(vcpu);
1282 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
1283 if (ret == PGM_PROTECTION) {
1284
1285 cc = 1;
1286 ret = guest_translate_address(vcpu, address1, ar, &gpa,
1287 GACC_FETCH);
1288 }
1289 if (ret) {
1290 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
1291 ret = kvm_s390_inject_program_int(vcpu, ret);
1292 } else if (ret > 0) {
1293
1294 kvm_s390_set_psw_cc(vcpu, 3);
1295 ret = 0;
1296 }
1297 goto out_unlock;
1298 }
1299
1300 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
1301 if (kvm_is_error_hva(hva)) {
1302 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1303 } else {
1304 if (!writable)
1305 cc = 1;
1306 kvm_s390_set_psw_cc(vcpu, cc);
1307
1308 }
1309out_unlock:
1310 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1311 ipte_unlock(vcpu);
1312 return ret;
1313}
1314
1315int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
1316{
1317
1318 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
1319 return handle_tprot(vcpu);
1320 return -EOPNOTSUPP;
1321}
1322
1323static int handle_sckpf(struct kvm_vcpu *vcpu)
1324{
1325 u32 value;
1326
1327 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1328 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1329
1330 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
1331 return kvm_s390_inject_program_int(vcpu,
1332 PGM_SPECIFICATION);
1333
1334 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1335 vcpu->arch.sie_block->todpr = value;
1336
1337 return 0;
1338}
1339
1340static int handle_ptff(struct kvm_vcpu *vcpu)
1341{
1342
1343 kvm_s390_set_psw_cc(vcpu, 3);
1344 return 0;
1345}
1346
1347static const intercept_handler_t x01_handlers[256] = {
1348 [0x04] = handle_ptff,
1349 [0x07] = handle_sckpf,
1350};
1351
1352int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1353{
1354 intercept_handler_t handler;
1355
1356 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
1357 if (handler)
1358 return handler(vcpu);
1359 return -EOPNOTSUPP;
1360}
1361