1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <asm/kvm_ppc.h>
21#include <asm/disassemble.h>
22#include <asm/kvm_book3s.h>
23#include <asm/reg.h>
24#include <asm/switch_to.h>
25#include <asm/time.h>
26
27#define OP_19_XOP_RFID 18
28#define OP_19_XOP_RFI 50
29
30#define OP_31_XOP_MFMSR 83
31#define OP_31_XOP_MTMSR 146
32#define OP_31_XOP_MTMSRD 178
33#define OP_31_XOP_MTSR 210
34#define OP_31_XOP_MTSRIN 242
35#define OP_31_XOP_TLBIEL 274
36#define OP_31_XOP_TLBIE 306
37
38#define OP_31_XOP_FAKE_SC1 308
39#define OP_31_XOP_SLBMTE 402
40#define OP_31_XOP_SLBIE 434
41#define OP_31_XOP_SLBIA 498
42#define OP_31_XOP_MFSR 595
43#define OP_31_XOP_MFSRIN 659
44#define OP_31_XOP_DCBA 758
45#define OP_31_XOP_SLBMFEV 851
46#define OP_31_XOP_EIOIO 854
47#define OP_31_XOP_SLBMFEE 915
48
49
50#define OP_31_XOP_DCBZ 1010
51
52#define OP_LFS 48
53#define OP_LFD 50
54#define OP_STFS 52
55#define OP_STFD 54
56
57#define SPRN_GQR0 912
58#define SPRN_GQR1 913
59#define SPRN_GQR2 914
60#define SPRN_GQR3 915
61#define SPRN_GQR4 916
62#define SPRN_GQR5 917
63#define SPRN_GQR6 918
64#define SPRN_GQR7 919
65
66
67
68#undef mfsrin
69
70enum priv_level {
71 PRIV_PROBLEM = 0,
72 PRIV_SUPER = 1,
73 PRIV_HYPER = 2,
74};
75
76static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
77{
78
79 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
80 return false;
81
82
83 if ((vcpu->arch.shared->msr & MSR_PR) && level > PRIV_PROBLEM)
84 return false;
85
86 return true;
87}
88
89int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
90 unsigned int inst, int *advance)
91{
92 int emulated = EMULATE_DONE;
93 int rt = get_rt(inst);
94 int rs = get_rs(inst);
95 int ra = get_ra(inst);
96 int rb = get_rb(inst);
97
98 switch (get_op(inst)) {
99 case 19:
100 switch (get_xop(inst)) {
101 case OP_19_XOP_RFID:
102 case OP_19_XOP_RFI:
103 kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0);
104 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
105 *advance = 0;
106 break;
107
108 default:
109 emulated = EMULATE_FAIL;
110 break;
111 }
112 break;
113 case 31:
114 switch (get_xop(inst)) {
115 case OP_31_XOP_MFMSR:
116 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
117 break;
118 case OP_31_XOP_MTMSRD:
119 {
120 ulong rs_val = kvmppc_get_gpr(vcpu, rs);
121 if (inst & 0x10000) {
122 ulong new_msr = vcpu->arch.shared->msr;
123 new_msr &= ~(MSR_RI | MSR_EE);
124 new_msr |= rs_val & (MSR_RI | MSR_EE);
125 vcpu->arch.shared->msr = new_msr;
126 } else
127 kvmppc_set_msr(vcpu, rs_val);
128 break;
129 }
130 case OP_31_XOP_MTMSR:
131 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
132 break;
133 case OP_31_XOP_MFSR:
134 {
135 int srnum;
136
137 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
138 if (vcpu->arch.mmu.mfsrin) {
139 u32 sr;
140 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
141 kvmppc_set_gpr(vcpu, rt, sr);
142 }
143 break;
144 }
145 case OP_31_XOP_MFSRIN:
146 {
147 int srnum;
148
149 srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
150 if (vcpu->arch.mmu.mfsrin) {
151 u32 sr;
152 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
153 kvmppc_set_gpr(vcpu, rt, sr);
154 }
155 break;
156 }
157 case OP_31_XOP_MTSR:
158 vcpu->arch.mmu.mtsrin(vcpu,
159 (inst >> 16) & 0xf,
160 kvmppc_get_gpr(vcpu, rs));
161 break;
162 case OP_31_XOP_MTSRIN:
163 vcpu->arch.mmu.mtsrin(vcpu,
164 (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
165 kvmppc_get_gpr(vcpu, rs));
166 break;
167 case OP_31_XOP_TLBIE:
168 case OP_31_XOP_TLBIEL:
169 {
170 bool large = (inst & 0x00200000) ? true : false;
171 ulong addr = kvmppc_get_gpr(vcpu, rb);
172 vcpu->arch.mmu.tlbie(vcpu, addr, large);
173 break;
174 }
175#ifdef CONFIG_PPC_BOOK3S_64
176 case OP_31_XOP_FAKE_SC1:
177 {
178
179 ulong cmd = kvmppc_get_gpr(vcpu, 3);
180 int i;
181
182 if ((vcpu->arch.shared->msr & MSR_PR) ||
183 !vcpu->arch.papr_enabled) {
184 emulated = EMULATE_FAIL;
185 break;
186 }
187
188 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
189 break;
190
191 run->papr_hcall.nr = cmd;
192 for (i = 0; i < 9; ++i) {
193 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
194 run->papr_hcall.args[i] = gpr;
195 }
196
197 run->exit_reason = KVM_EXIT_PAPR_HCALL;
198 vcpu->arch.hcall_needed = 1;
199 emulated = EMULATE_EXIT_USER;
200 break;
201 }
202#endif
203 case OP_31_XOP_EIOIO:
204 break;
205 case OP_31_XOP_SLBMTE:
206 if (!vcpu->arch.mmu.slbmte)
207 return EMULATE_FAIL;
208
209 vcpu->arch.mmu.slbmte(vcpu,
210 kvmppc_get_gpr(vcpu, rs),
211 kvmppc_get_gpr(vcpu, rb));
212 break;
213 case OP_31_XOP_SLBIE:
214 if (!vcpu->arch.mmu.slbie)
215 return EMULATE_FAIL;
216
217 vcpu->arch.mmu.slbie(vcpu,
218 kvmppc_get_gpr(vcpu, rb));
219 break;
220 case OP_31_XOP_SLBIA:
221 if (!vcpu->arch.mmu.slbia)
222 return EMULATE_FAIL;
223
224 vcpu->arch.mmu.slbia(vcpu);
225 break;
226 case OP_31_XOP_SLBMFEE:
227 if (!vcpu->arch.mmu.slbmfee) {
228 emulated = EMULATE_FAIL;
229 } else {
230 ulong t, rb_val;
231
232 rb_val = kvmppc_get_gpr(vcpu, rb);
233 t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
234 kvmppc_set_gpr(vcpu, rt, t);
235 }
236 break;
237 case OP_31_XOP_SLBMFEV:
238 if (!vcpu->arch.mmu.slbmfev) {
239 emulated = EMULATE_FAIL;
240 } else {
241 ulong t, rb_val;
242
243 rb_val = kvmppc_get_gpr(vcpu, rb);
244 t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
245 kvmppc_set_gpr(vcpu, rt, t);
246 }
247 break;
248 case OP_31_XOP_DCBA:
249
250 break;
251 case OP_31_XOP_DCBZ:
252 {
253 ulong rb_val = kvmppc_get_gpr(vcpu, rb);
254 ulong ra_val = 0;
255 ulong addr, vaddr;
256 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
257 u32 dsisr;
258 int r;
259
260 if (ra)
261 ra_val = kvmppc_get_gpr(vcpu, ra);
262
263 addr = (ra_val + rb_val) & ~31ULL;
264 if (!(vcpu->arch.shared->msr & MSR_SF))
265 addr &= 0xffffffff;
266 vaddr = addr;
267
268 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
269 if ((r == -ENOENT) || (r == -EPERM)) {
270 *advance = 0;
271 vcpu->arch.shared->dar = vaddr;
272 vcpu->arch.fault_dar = vaddr;
273
274 dsisr = DSISR_ISSTORE;
275 if (r == -ENOENT)
276 dsisr |= DSISR_NOHPTE;
277 else if (r == -EPERM)
278 dsisr |= DSISR_PROTFAULT;
279
280 vcpu->arch.shared->dsisr = dsisr;
281 vcpu->arch.fault_dsisr = dsisr;
282
283 kvmppc_book3s_queue_irqprio(vcpu,
284 BOOK3S_INTERRUPT_DATA_STORAGE);
285 }
286
287 break;
288 }
289 default:
290 emulated = EMULATE_FAIL;
291 }
292 break;
293 default:
294 emulated = EMULATE_FAIL;
295 }
296
297 if (emulated == EMULATE_FAIL)
298 emulated = kvmppc_emulate_paired_single(run, vcpu);
299
300 return emulated;
301}
302
303void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
304 u32 val)
305{
306 if (upper) {
307
308 u32 bl = (val >> 2) & 0x7ff;
309 bat->bepi_mask = (~bl << 17);
310 bat->bepi = val & 0xfffe0000;
311 bat->vs = (val & 2) ? 1 : 0;
312 bat->vp = (val & 1) ? 1 : 0;
313 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
314 } else {
315
316 bat->brpn = val & 0xfffe0000;
317 bat->wimg = (val >> 3) & 0xf;
318 bat->pp = val & 3;
319 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
320 }
321}
322
323static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
324{
325 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
326 struct kvmppc_bat *bat;
327
328 switch (sprn) {
329 case SPRN_IBAT0U ... SPRN_IBAT3L:
330 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
331 break;
332 case SPRN_IBAT4U ... SPRN_IBAT7L:
333 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
334 break;
335 case SPRN_DBAT0U ... SPRN_DBAT3L:
336 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
337 break;
338 case SPRN_DBAT4U ... SPRN_DBAT7L:
339 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
340 break;
341 default:
342 BUG();
343 }
344
345 return bat;
346}
347
348int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
349{
350 int emulated = EMULATE_DONE;
351
352 switch (sprn) {
353 case SPRN_SDR1:
354 if (!spr_allowed(vcpu, PRIV_HYPER))
355 goto unprivileged;
356 to_book3s(vcpu)->sdr1 = spr_val;
357 break;
358 case SPRN_DSISR:
359 vcpu->arch.shared->dsisr = spr_val;
360 break;
361 case SPRN_DAR:
362 vcpu->arch.shared->dar = spr_val;
363 break;
364 case SPRN_HIOR:
365 to_book3s(vcpu)->hior = spr_val;
366 break;
367 case SPRN_IBAT0U ... SPRN_IBAT3L:
368 case SPRN_IBAT4U ... SPRN_IBAT7L:
369 case SPRN_DBAT0U ... SPRN_DBAT3L:
370 case SPRN_DBAT4U ... SPRN_DBAT7L:
371 {
372 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
373
374 kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
375
376
377 kvmppc_mmu_pte_flush(vcpu, 0, 0);
378 kvmppc_mmu_flush_segments(vcpu);
379 break;
380 }
381 case SPRN_HID0:
382 to_book3s(vcpu)->hid[0] = spr_val;
383 break;
384 case SPRN_HID1:
385 to_book3s(vcpu)->hid[1] = spr_val;
386 break;
387 case SPRN_HID2:
388 to_book3s(vcpu)->hid[2] = spr_val;
389 break;
390 case SPRN_HID2_GEKKO:
391 to_book3s(vcpu)->hid[2] = spr_val;
392
393 switch (vcpu->arch.pvr) {
394 case 0x00080200:
395 case 0x00088202:
396 case 0x70000100:
397 case 0x00080100:
398 case 0x00083203:
399 case 0x00083213:
400 case 0x00083204:
401 case 0x00083214:
402 case 0x00087200:
403 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
404
405 } else if (spr_val & (1 << 29)) {
406 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
407 kvmppc_giveup_ext(vcpu, MSR_FP);
408 } else {
409 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
410 }
411 break;
412 }
413 break;
414 case SPRN_HID4:
415 case SPRN_HID4_GEKKO:
416 to_book3s(vcpu)->hid[4] = spr_val;
417 break;
418 case SPRN_HID5:
419 to_book3s(vcpu)->hid[5] = spr_val;
420
421 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
422 (mfmsr() & MSR_HV))
423 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
424 break;
425 case SPRN_PURR:
426 to_book3s(vcpu)->purr_offset = spr_val - get_tb();
427 break;
428 case SPRN_SPURR:
429 to_book3s(vcpu)->spurr_offset = spr_val - get_tb();
430 break;
431 case SPRN_GQR0:
432 case SPRN_GQR1:
433 case SPRN_GQR2:
434 case SPRN_GQR3:
435 case SPRN_GQR4:
436 case SPRN_GQR5:
437 case SPRN_GQR6:
438 case SPRN_GQR7:
439 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
440 break;
441 case SPRN_ICTC:
442 case SPRN_THRM1:
443 case SPRN_THRM2:
444 case SPRN_THRM3:
445 case SPRN_CTRLF:
446 case SPRN_CTRLT:
447 case SPRN_L2CR:
448 case SPRN_DSCR:
449 case SPRN_MMCR0_GEKKO:
450 case SPRN_MMCR1_GEKKO:
451 case SPRN_PMC1_GEKKO:
452 case SPRN_PMC2_GEKKO:
453 case SPRN_PMC3_GEKKO:
454 case SPRN_PMC4_GEKKO:
455 case SPRN_WPAR_GEKKO:
456 case SPRN_MSSSR0:
457 case SPRN_DABR:
458 break;
459unprivileged:
460 default:
461 printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
462#ifndef DEBUG_SPR
463 emulated = EMULATE_FAIL;
464#endif
465 break;
466 }
467
468 return emulated;
469}
470
471int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
472{
473 int emulated = EMULATE_DONE;
474
475 switch (sprn) {
476 case SPRN_IBAT0U ... SPRN_IBAT3L:
477 case SPRN_IBAT4U ... SPRN_IBAT7L:
478 case SPRN_DBAT0U ... SPRN_DBAT3L:
479 case SPRN_DBAT4U ... SPRN_DBAT7L:
480 {
481 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
482
483 if (sprn % 2)
484 *spr_val = bat->raw >> 32;
485 else
486 *spr_val = bat->raw;
487
488 break;
489 }
490 case SPRN_SDR1:
491 if (!spr_allowed(vcpu, PRIV_HYPER))
492 goto unprivileged;
493 *spr_val = to_book3s(vcpu)->sdr1;
494 break;
495 case SPRN_DSISR:
496 *spr_val = vcpu->arch.shared->dsisr;
497 break;
498 case SPRN_DAR:
499 *spr_val = vcpu->arch.shared->dar;
500 break;
501 case SPRN_HIOR:
502 *spr_val = to_book3s(vcpu)->hior;
503 break;
504 case SPRN_HID0:
505 *spr_val = to_book3s(vcpu)->hid[0];
506 break;
507 case SPRN_HID1:
508 *spr_val = to_book3s(vcpu)->hid[1];
509 break;
510 case SPRN_HID2:
511 case SPRN_HID2_GEKKO:
512 *spr_val = to_book3s(vcpu)->hid[2];
513 break;
514 case SPRN_HID4:
515 case SPRN_HID4_GEKKO:
516 *spr_val = to_book3s(vcpu)->hid[4];
517 break;
518 case SPRN_HID5:
519 *spr_val = to_book3s(vcpu)->hid[5];
520 break;
521 case SPRN_CFAR:
522 case SPRN_DSCR:
523 *spr_val = 0;
524 break;
525 case SPRN_PURR:
526 *spr_val = get_tb() + to_book3s(vcpu)->purr_offset;
527 break;
528 case SPRN_SPURR:
529 *spr_val = get_tb() + to_book3s(vcpu)->purr_offset;
530 break;
531 case SPRN_GQR0:
532 case SPRN_GQR1:
533 case SPRN_GQR2:
534 case SPRN_GQR3:
535 case SPRN_GQR4:
536 case SPRN_GQR5:
537 case SPRN_GQR6:
538 case SPRN_GQR7:
539 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
540 break;
541 case SPRN_THRM1:
542 case SPRN_THRM2:
543 case SPRN_THRM3:
544 case SPRN_CTRLF:
545 case SPRN_CTRLT:
546 case SPRN_L2CR:
547 case SPRN_MMCR0_GEKKO:
548 case SPRN_MMCR1_GEKKO:
549 case SPRN_PMC1_GEKKO:
550 case SPRN_PMC2_GEKKO:
551 case SPRN_PMC3_GEKKO:
552 case SPRN_PMC4_GEKKO:
553 case SPRN_WPAR_GEKKO:
554 case SPRN_MSSSR0:
555 case SPRN_DABR:
556 *spr_val = 0;
557 break;
558 default:
559unprivileged:
560 printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
561#ifndef DEBUG_SPR
562 emulated = EMULATE_FAIL;
563#endif
564 break;
565 }
566
567 return emulated;
568}
569
570u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
571{
572 u32 dsisr = 0;
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587 switch (get_op(inst)) {
588
589 case OP_LFS:
590 case OP_LFD:
591 case OP_STFD:
592 case OP_STFS:
593 dsisr |= (inst >> 12) & 0x4000;
594 dsisr |= (inst >> 17) & 0x3c00;
595 break;
596
597 case 31:
598 dsisr |= (inst << 14) & 0x18000;
599 dsisr |= (inst << 8) & 0x04000;
600 dsisr |= (inst << 3) & 0x03c00;
601 break;
602 default:
603 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
604 break;
605 }
606
607 dsisr |= (inst >> 16) & 0x03ff;
608
609 return dsisr;
610}
611
612ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
613{
614 ulong dar = 0;
615 ulong ra = get_ra(inst);
616 ulong rb = get_rb(inst);
617
618 switch (get_op(inst)) {
619 case OP_LFS:
620 case OP_LFD:
621 case OP_STFD:
622 case OP_STFS:
623 if (ra)
624 dar = kvmppc_get_gpr(vcpu, ra);
625 dar += (s32)((s16)inst);
626 break;
627 case 31:
628 if (ra)
629 dar = kvmppc_get_gpr(vcpu, ra);
630 dar += kvmppc_get_gpr(vcpu, rb);
631 break;
632 default:
633 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
634 break;
635 }
636
637 return dar;
638}
639