1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <asm/kvm_ppc.h>
21#include <asm/disassemble.h>
22#include <asm/kvm_book3s.h>
23#include <asm/reg.h>
24
25#define OP_19_XOP_RFID 18
26#define OP_19_XOP_RFI 50
27
28#define OP_31_XOP_MFMSR 83
29#define OP_31_XOP_MTMSR 146
30#define OP_31_XOP_MTMSRD 178
31#define OP_31_XOP_MTSR 210
32#define OP_31_XOP_MTSRIN 242
33#define OP_31_XOP_TLBIEL 274
34#define OP_31_XOP_TLBIE 306
35#define OP_31_XOP_SLBMTE 402
36#define OP_31_XOP_SLBIE 434
37#define OP_31_XOP_SLBIA 498
38#define OP_31_XOP_MFSR 595
39#define OP_31_XOP_MFSRIN 659
40#define OP_31_XOP_DCBA 758
41#define OP_31_XOP_SLBMFEV 851
42#define OP_31_XOP_EIOIO 854
43#define OP_31_XOP_SLBMFEE 915
44
45
46#define OP_31_XOP_DCBZ 1010
47
48#define OP_LFS 48
49#define OP_LFD 50
50#define OP_STFS 52
51#define OP_STFD 54
52
53#define SPRN_GQR0 912
54#define SPRN_GQR1 913
55#define SPRN_GQR2 914
56#define SPRN_GQR3 915
57#define SPRN_GQR4 916
58#define SPRN_GQR5 917
59#define SPRN_GQR6 918
60#define SPRN_GQR7 919
61
62
63
64#undef mfsrin
65
66int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
67 unsigned int inst, int *advance)
68{
69 int emulated = EMULATE_DONE;
70
71 switch (get_op(inst)) {
72 case 19:
73 switch (get_xop(inst)) {
74 case OP_19_XOP_RFID:
75 case OP_19_XOP_RFI:
76 kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0);
77 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
78 *advance = 0;
79 break;
80
81 default:
82 emulated = EMULATE_FAIL;
83 break;
84 }
85 break;
86 case 31:
87 switch (get_xop(inst)) {
88 case OP_31_XOP_MFMSR:
89 kvmppc_set_gpr(vcpu, get_rt(inst),
90 vcpu->arch.shared->msr);
91 break;
92 case OP_31_XOP_MTMSRD:
93 {
94 ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
95 if (inst & 0x10000) {
96 vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE);
97 vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE);
98 } else
99 kvmppc_set_msr(vcpu, rs);
100 break;
101 }
102 case OP_31_XOP_MTMSR:
103 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
104 break;
105 case OP_31_XOP_MFSR:
106 {
107 int srnum;
108
109 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
110 if (vcpu->arch.mmu.mfsrin) {
111 u32 sr;
112 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
113 kvmppc_set_gpr(vcpu, get_rt(inst), sr);
114 }
115 break;
116 }
117 case OP_31_XOP_MFSRIN:
118 {
119 int srnum;
120
121 srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
122 if (vcpu->arch.mmu.mfsrin) {
123 u32 sr;
124 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
125 kvmppc_set_gpr(vcpu, get_rt(inst), sr);
126 }
127 break;
128 }
129 case OP_31_XOP_MTSR:
130 vcpu->arch.mmu.mtsrin(vcpu,
131 (inst >> 16) & 0xf,
132 kvmppc_get_gpr(vcpu, get_rs(inst)));
133 break;
134 case OP_31_XOP_MTSRIN:
135 vcpu->arch.mmu.mtsrin(vcpu,
136 (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
137 kvmppc_get_gpr(vcpu, get_rs(inst)));
138 break;
139 case OP_31_XOP_TLBIE:
140 case OP_31_XOP_TLBIEL:
141 {
142 bool large = (inst & 0x00200000) ? true : false;
143 ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
144 vcpu->arch.mmu.tlbie(vcpu, addr, large);
145 break;
146 }
147 case OP_31_XOP_EIOIO:
148 break;
149 case OP_31_XOP_SLBMTE:
150 if (!vcpu->arch.mmu.slbmte)
151 return EMULATE_FAIL;
152
153 vcpu->arch.mmu.slbmte(vcpu,
154 kvmppc_get_gpr(vcpu, get_rs(inst)),
155 kvmppc_get_gpr(vcpu, get_rb(inst)));
156 break;
157 case OP_31_XOP_SLBIE:
158 if (!vcpu->arch.mmu.slbie)
159 return EMULATE_FAIL;
160
161 vcpu->arch.mmu.slbie(vcpu,
162 kvmppc_get_gpr(vcpu, get_rb(inst)));
163 break;
164 case OP_31_XOP_SLBIA:
165 if (!vcpu->arch.mmu.slbia)
166 return EMULATE_FAIL;
167
168 vcpu->arch.mmu.slbia(vcpu);
169 break;
170 case OP_31_XOP_SLBMFEE:
171 if (!vcpu->arch.mmu.slbmfee) {
172 emulated = EMULATE_FAIL;
173 } else {
174 ulong t, rb;
175
176 rb = kvmppc_get_gpr(vcpu, get_rb(inst));
177 t = vcpu->arch.mmu.slbmfee(vcpu, rb);
178 kvmppc_set_gpr(vcpu, get_rt(inst), t);
179 }
180 break;
181 case OP_31_XOP_SLBMFEV:
182 if (!vcpu->arch.mmu.slbmfev) {
183 emulated = EMULATE_FAIL;
184 } else {
185 ulong t, rb;
186
187 rb = kvmppc_get_gpr(vcpu, get_rb(inst));
188 t = vcpu->arch.mmu.slbmfev(vcpu, rb);
189 kvmppc_set_gpr(vcpu, get_rt(inst), t);
190 }
191 break;
192 case OP_31_XOP_DCBA:
193
194 break;
195 case OP_31_XOP_DCBZ:
196 {
197 ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
198 ulong ra = 0;
199 ulong addr, vaddr;
200 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
201 u32 dsisr;
202 int r;
203
204 if (get_ra(inst))
205 ra = kvmppc_get_gpr(vcpu, get_ra(inst));
206
207 addr = (ra + rb) & ~31ULL;
208 if (!(vcpu->arch.shared->msr & MSR_SF))
209 addr &= 0xffffffff;
210 vaddr = addr;
211
212 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
213 if ((r == -ENOENT) || (r == -EPERM)) {
214 *advance = 0;
215 vcpu->arch.shared->dar = vaddr;
216 to_svcpu(vcpu)->fault_dar = vaddr;
217
218 dsisr = DSISR_ISSTORE;
219 if (r == -ENOENT)
220 dsisr |= DSISR_NOHPTE;
221 else if (r == -EPERM)
222 dsisr |= DSISR_PROTFAULT;
223
224 vcpu->arch.shared->dsisr = dsisr;
225 to_svcpu(vcpu)->fault_dsisr = dsisr;
226
227 kvmppc_book3s_queue_irqprio(vcpu,
228 BOOK3S_INTERRUPT_DATA_STORAGE);
229 }
230
231 break;
232 }
233 default:
234 emulated = EMULATE_FAIL;
235 }
236 break;
237 default:
238 emulated = EMULATE_FAIL;
239 }
240
241 if (emulated == EMULATE_FAIL)
242 emulated = kvmppc_emulate_paired_single(run, vcpu);
243
244 return emulated;
245}
246
247void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
248 u32 val)
249{
250 if (upper) {
251
252 u32 bl = (val >> 2) & 0x7ff;
253 bat->bepi_mask = (~bl << 17);
254 bat->bepi = val & 0xfffe0000;
255 bat->vs = (val & 2) ? 1 : 0;
256 bat->vp = (val & 1) ? 1 : 0;
257 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
258 } else {
259
260 bat->brpn = val & 0xfffe0000;
261 bat->wimg = (val >> 3) & 0xf;
262 bat->pp = val & 3;
263 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
264 }
265}
266
267static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
268{
269 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
270 struct kvmppc_bat *bat;
271
272 switch (sprn) {
273 case SPRN_IBAT0U ... SPRN_IBAT3L:
274 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
275 break;
276 case SPRN_IBAT4U ... SPRN_IBAT7L:
277 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
278 break;
279 case SPRN_DBAT0U ... SPRN_DBAT3L:
280 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
281 break;
282 case SPRN_DBAT4U ... SPRN_DBAT7L:
283 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
284 break;
285 default:
286 BUG();
287 }
288
289 return bat;
290}
291
292int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
293{
294 int emulated = EMULATE_DONE;
295 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
296
297 switch (sprn) {
298 case SPRN_SDR1:
299 to_book3s(vcpu)->sdr1 = spr_val;
300 break;
301 case SPRN_DSISR:
302 vcpu->arch.shared->dsisr = spr_val;
303 break;
304 case SPRN_DAR:
305 vcpu->arch.shared->dar = spr_val;
306 break;
307 case SPRN_HIOR:
308 to_book3s(vcpu)->hior = spr_val;
309 break;
310 case SPRN_IBAT0U ... SPRN_IBAT3L:
311 case SPRN_IBAT4U ... SPRN_IBAT7L:
312 case SPRN_DBAT0U ... SPRN_DBAT3L:
313 case SPRN_DBAT4U ... SPRN_DBAT7L:
314 {
315 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
316
317 kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
318
319
320 kvmppc_mmu_pte_flush(vcpu, 0, 0);
321 kvmppc_mmu_flush_segments(vcpu);
322 break;
323 }
324 case SPRN_HID0:
325 to_book3s(vcpu)->hid[0] = spr_val;
326 break;
327 case SPRN_HID1:
328 to_book3s(vcpu)->hid[1] = spr_val;
329 break;
330 case SPRN_HID2:
331 to_book3s(vcpu)->hid[2] = spr_val;
332 break;
333 case SPRN_HID2_GEKKO:
334 to_book3s(vcpu)->hid[2] = spr_val;
335
336 switch (vcpu->arch.pvr) {
337 case 0x00080200:
338 case 0x00088202:
339 case 0x70000100:
340 case 0x00080100:
341 case 0x00083203:
342 case 0x00083213:
343 case 0x00083204:
344 case 0x00083214:
345 case 0x00087200:
346 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
347
348 } else if (spr_val & (1 << 29)) {
349 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
350 kvmppc_giveup_ext(vcpu, MSR_FP);
351 } else {
352 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
353 }
354 break;
355 }
356 break;
357 case SPRN_HID4:
358 case SPRN_HID4_GEKKO:
359 to_book3s(vcpu)->hid[4] = spr_val;
360 break;
361 case SPRN_HID5:
362 to_book3s(vcpu)->hid[5] = spr_val;
363
364 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
365 (mfmsr() & MSR_HV))
366 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
367 break;
368 case SPRN_GQR0:
369 case SPRN_GQR1:
370 case SPRN_GQR2:
371 case SPRN_GQR3:
372 case SPRN_GQR4:
373 case SPRN_GQR5:
374 case SPRN_GQR6:
375 case SPRN_GQR7:
376 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
377 break;
378 case SPRN_ICTC:
379 case SPRN_THRM1:
380 case SPRN_THRM2:
381 case SPRN_THRM3:
382 case SPRN_CTRLF:
383 case SPRN_CTRLT:
384 case SPRN_L2CR:
385 case SPRN_MMCR0_GEKKO:
386 case SPRN_MMCR1_GEKKO:
387 case SPRN_PMC1_GEKKO:
388 case SPRN_PMC2_GEKKO:
389 case SPRN_PMC3_GEKKO:
390 case SPRN_PMC4_GEKKO:
391 case SPRN_WPAR_GEKKO:
392 break;
393 default:
394 printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
395#ifndef DEBUG_SPR
396 emulated = EMULATE_FAIL;
397#endif
398 break;
399 }
400
401 return emulated;
402}
403
404int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
405{
406 int emulated = EMULATE_DONE;
407
408 switch (sprn) {
409 case SPRN_IBAT0U ... SPRN_IBAT3L:
410 case SPRN_IBAT4U ... SPRN_IBAT7L:
411 case SPRN_DBAT0U ... SPRN_DBAT3L:
412 case SPRN_DBAT4U ... SPRN_DBAT7L:
413 {
414 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
415
416 if (sprn % 2)
417 kvmppc_set_gpr(vcpu, rt, bat->raw >> 32);
418 else
419 kvmppc_set_gpr(vcpu, rt, bat->raw);
420
421 break;
422 }
423 case SPRN_SDR1:
424 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
425 break;
426 case SPRN_DSISR:
427 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr);
428 break;
429 case SPRN_DAR:
430 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar);
431 break;
432 case SPRN_HIOR:
433 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
434 break;
435 case SPRN_HID0:
436 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]);
437 break;
438 case SPRN_HID1:
439 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
440 break;
441 case SPRN_HID2:
442 case SPRN_HID2_GEKKO:
443 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
444 break;
445 case SPRN_HID4:
446 case SPRN_HID4_GEKKO:
447 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
448 break;
449 case SPRN_HID5:
450 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
451 break;
452 case SPRN_GQR0:
453 case SPRN_GQR1:
454 case SPRN_GQR2:
455 case SPRN_GQR3:
456 case SPRN_GQR4:
457 case SPRN_GQR5:
458 case SPRN_GQR6:
459 case SPRN_GQR7:
460 kvmppc_set_gpr(vcpu, rt,
461 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
462 break;
463 case SPRN_THRM1:
464 case SPRN_THRM2:
465 case SPRN_THRM3:
466 case SPRN_CTRLF:
467 case SPRN_CTRLT:
468 case SPRN_L2CR:
469 case SPRN_MMCR0_GEKKO:
470 case SPRN_MMCR1_GEKKO:
471 case SPRN_PMC1_GEKKO:
472 case SPRN_PMC2_GEKKO:
473 case SPRN_PMC3_GEKKO:
474 case SPRN_PMC4_GEKKO:
475 case SPRN_WPAR_GEKKO:
476 kvmppc_set_gpr(vcpu, rt, 0);
477 break;
478 default:
479 printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
480#ifndef DEBUG_SPR
481 emulated = EMULATE_FAIL;
482#endif
483 break;
484 }
485
486 return emulated;
487}
488
489u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
490{
491 u32 dsisr = 0;
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506 switch (get_op(inst)) {
507
508 case OP_LFS:
509 case OP_LFD:
510 case OP_STFD:
511 case OP_STFS:
512 dsisr |= (inst >> 12) & 0x4000;
513 dsisr |= (inst >> 17) & 0x3c00;
514 break;
515
516 case 31:
517 dsisr |= (inst << 14) & 0x18000;
518 dsisr |= (inst << 8) & 0x04000;
519 dsisr |= (inst << 3) & 0x03c00;
520 break;
521 default:
522 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
523 break;
524 }
525
526 dsisr |= (inst >> 16) & 0x03ff;
527
528 return dsisr;
529}
530
531ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
532{
533 ulong dar = 0;
534 ulong ra;
535
536 switch (get_op(inst)) {
537 case OP_LFS:
538 case OP_LFD:
539 case OP_STFD:
540 case OP_STFS:
541 ra = get_ra(inst);
542 if (ra)
543 dar = kvmppc_get_gpr(vcpu, ra);
544 dar += (s32)((s16)inst);
545 break;
546 case 31:
547 ra = get_ra(inst);
548 if (ra)
549 dar = kvmppc_get_gpr(vcpu, ra);
550 dar += kvmppc_get_gpr(vcpu, get_rb(inst));
551 break;
552 default:
553 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
554 break;
555 }
556
557 return dar;
558}
559