1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/kvm_host.h>
18#include <linux/err.h>
19#include <linux/export.h>
20#include <linux/slab.h>
21
22#include <asm/reg.h>
23#include <asm/cputable.h>
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26#include <asm/uaccess.h>
27#include <asm/io.h>
28#include <asm/kvm_ppc.h>
29#include <asm/kvm_book3s.h>
30#include <asm/mmu_context.h>
31#include <asm/page.h>
32#include <linux/gfp.h>
33#include <linux/sched.h>
34#include <linux/vmalloc.h>
35#include <linux/highmem.h>
36
37#include "book3s.h"
38#include "trace.h"
39
40#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42
43
44struct kvm_stats_debugfs_item debugfs_entries[] = {
45 { "exits", VCPU_STAT(sum_exits) },
46 { "mmio", VCPU_STAT(mmio_exits) },
47 { "sig", VCPU_STAT(signal_exits) },
48 { "sysc", VCPU_STAT(syscall_exits) },
49 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
50 { "dec", VCPU_STAT(dec_exits) },
51 { "ext_intr", VCPU_STAT(ext_intr_exits) },
52 { "queue_intr", VCPU_STAT(queue_intr) },
53 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
54 { "pf_storage", VCPU_STAT(pf_storage) },
55 { "sp_storage", VCPU_STAT(sp_storage) },
56 { "pf_instruc", VCPU_STAT(pf_instruc) },
57 { "sp_instruc", VCPU_STAT(sp_instruc) },
58 { "ld", VCPU_STAT(ld) },
59 { "ld_slow", VCPU_STAT(ld_slow) },
60 { "st", VCPU_STAT(st) },
61 { "st_slow", VCPU_STAT(st_slow) },
62 { NULL }
63};
64
65void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
66{
67}
68
69void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
70{
71}
72
73static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
74{
75 if (!is_kvmppc_hv_enabled(vcpu->kvm))
76 return to_book3s(vcpu)->hior;
77 return 0;
78}
79
80static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
81 unsigned long pending_now, unsigned long old_pending)
82{
83 if (is_kvmppc_hv_enabled(vcpu->kvm))
84 return;
85 if (pending_now)
86 vcpu->arch.shared->int_pending = 1;
87 else if (old_pending)
88 vcpu->arch.shared->int_pending = 0;
89}
90
91static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
92{
93 ulong crit_raw;
94 ulong crit_r1;
95 bool crit;
96
97 if (is_kvmppc_hv_enabled(vcpu->kvm))
98 return false;
99
100 crit_raw = vcpu->arch.shared->critical;
101 crit_r1 = kvmppc_get_gpr(vcpu, 1);
102
103
104 if (!(vcpu->arch.shared->msr & MSR_SF)) {
105 crit_raw &= 0xffffffff;
106 crit_r1 &= 0xffffffff;
107 }
108
109
110 crit = (crit_raw == crit_r1);
111
112 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
113
114 return crit;
115}
116
117void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
118{
119 vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
120 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
121 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
122 vcpu->arch.mmu.reset_msr(vcpu);
123}
124
125static int kvmppc_book3s_vec2irqprio(unsigned int vec)
126{
127 unsigned int prio;
128
129 switch (vec) {
130 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
131 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
132 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
133 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
134 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
135 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
136 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
137 case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
138 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
139 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
140 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
141 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
142 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
143 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
144 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
145 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
146 default: prio = BOOK3S_IRQPRIO_MAX; break;
147 }
148
149 return prio;
150}
151
152void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
153 unsigned int vec)
154{
155 unsigned long old_pending = vcpu->arch.pending_exceptions;
156
157 clear_bit(kvmppc_book3s_vec2irqprio(vec),
158 &vcpu->arch.pending_exceptions);
159
160 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
161 old_pending);
162}
163
164void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
165{
166 vcpu->stat.queue_intr++;
167
168 set_bit(kvmppc_book3s_vec2irqprio(vec),
169 &vcpu->arch.pending_exceptions);
170#ifdef EXIT_DEBUG
171 printk(KERN_INFO "Queueing interrupt %x\n", vec);
172#endif
173}
174EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
175
176void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
177{
178
179 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
180}
181EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
182
183void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
184{
185 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
186}
187EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
188
189int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
190{
191 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
192}
193EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
194
195void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
196{
197 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
198}
199EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
200
201void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
202 struct kvm_interrupt *irq)
203{
204 unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
205
206 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
207 vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
208
209 kvmppc_book3s_queue_irqprio(vcpu, vec);
210}
211
212void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
213{
214 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
215 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
216}
217
218int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
219{
220 int deliver = 1;
221 int vec = 0;
222 bool crit = kvmppc_critical_section(vcpu);
223
224 switch (priority) {
225 case BOOK3S_IRQPRIO_DECREMENTER:
226 deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
227 vec = BOOK3S_INTERRUPT_DECREMENTER;
228 break;
229 case BOOK3S_IRQPRIO_EXTERNAL:
230 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
231 deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
232 vec = BOOK3S_INTERRUPT_EXTERNAL;
233 break;
234 case BOOK3S_IRQPRIO_SYSTEM_RESET:
235 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
236 break;
237 case BOOK3S_IRQPRIO_MACHINE_CHECK:
238 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
239 break;
240 case BOOK3S_IRQPRIO_DATA_STORAGE:
241 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
242 break;
243 case BOOK3S_IRQPRIO_INST_STORAGE:
244 vec = BOOK3S_INTERRUPT_INST_STORAGE;
245 break;
246 case BOOK3S_IRQPRIO_DATA_SEGMENT:
247 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
248 break;
249 case BOOK3S_IRQPRIO_INST_SEGMENT:
250 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
251 break;
252 case BOOK3S_IRQPRIO_ALIGNMENT:
253 vec = BOOK3S_INTERRUPT_ALIGNMENT;
254 break;
255 case BOOK3S_IRQPRIO_PROGRAM:
256 vec = BOOK3S_INTERRUPT_PROGRAM;
257 break;
258 case BOOK3S_IRQPRIO_VSX:
259 vec = BOOK3S_INTERRUPT_VSX;
260 break;
261 case BOOK3S_IRQPRIO_ALTIVEC:
262 vec = BOOK3S_INTERRUPT_ALTIVEC;
263 break;
264 case BOOK3S_IRQPRIO_FP_UNAVAIL:
265 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
266 break;
267 case BOOK3S_IRQPRIO_SYSCALL:
268 vec = BOOK3S_INTERRUPT_SYSCALL;
269 break;
270 case BOOK3S_IRQPRIO_DEBUG:
271 vec = BOOK3S_INTERRUPT_TRACE;
272 break;
273 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
274 vec = BOOK3S_INTERRUPT_PERFMON;
275 break;
276 default:
277 deliver = 0;
278 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
279 break;
280 }
281
282#if 0
283 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
284#endif
285
286 if (deliver)
287 kvmppc_inject_interrupt(vcpu, vec, 0);
288
289 return deliver;
290}
291
292
293
294
295static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
296{
297 switch (priority) {
298 case BOOK3S_IRQPRIO_DECREMENTER:
299
300 return false;
301 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
302
303 return false;
304 }
305
306 return true;
307}
308
309int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
310{
311 unsigned long *pending = &vcpu->arch.pending_exceptions;
312 unsigned long old_pending = vcpu->arch.pending_exceptions;
313 unsigned int priority;
314
315#ifdef EXIT_DEBUG
316 if (vcpu->arch.pending_exceptions)
317 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
318#endif
319 priority = __ffs(*pending);
320 while (priority < BOOK3S_IRQPRIO_MAX) {
321 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
322 clear_irqprio(vcpu, priority)) {
323 clear_bit(priority, &vcpu->arch.pending_exceptions);
324 break;
325 }
326
327 priority = find_next_bit(pending,
328 BITS_PER_BYTE * sizeof(*pending),
329 priority + 1);
330 }
331
332
333 kvmppc_update_int_pending(vcpu, *pending, old_pending);
334
335 return 0;
336}
337EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
338
339pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
340 bool *writable)
341{
342 ulong mp_pa = vcpu->arch.magic_page_pa;
343
344 if (!(vcpu->arch.shared->msr & MSR_SF))
345 mp_pa = (uint32_t)mp_pa;
346
347
348 if (unlikely(mp_pa) &&
349 unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
350 ((mp_pa & PAGE_MASK) & KVM_PAM))) {
351 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
352 pfn_t pfn;
353
354 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
355 get_page(pfn_to_page(pfn));
356 if (writable)
357 *writable = true;
358 return pfn;
359 }
360
361 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
362}
363EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn);
364
365static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
366 bool iswrite, struct kvmppc_pte *pte)
367{
368 int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
369 int r;
370
371 if (relocated) {
372 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
373 } else {
374 pte->eaddr = eaddr;
375 pte->raddr = eaddr & KVM_PAM;
376 pte->vpage = VSID_REAL | eaddr >> 12;
377 pte->may_read = true;
378 pte->may_write = true;
379 pte->may_execute = true;
380 r = 0;
381 }
382
383 return r;
384}
385
386static hva_t kvmppc_bad_hva(void)
387{
388 return PAGE_OFFSET;
389}
390
391static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
392 bool read)
393{
394 hva_t hpage;
395
396 if (read && !pte->may_read)
397 goto err;
398
399 if (!read && !pte->may_write)
400 goto err;
401
402 hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
403 if (kvm_is_error_hva(hpage))
404 goto err;
405
406 return hpage | (pte->raddr & ~PAGE_MASK);
407err:
408 return kvmppc_bad_hva();
409}
410
411int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
412 bool data)
413{
414 struct kvmppc_pte pte;
415
416 vcpu->stat.st++;
417
418 if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte))
419 return -ENOENT;
420
421 *eaddr = pte.raddr;
422
423 if (!pte.may_write)
424 return -EPERM;
425
426 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
427 return EMULATE_DO_MMIO;
428
429 return EMULATE_DONE;
430}
431EXPORT_SYMBOL_GPL(kvmppc_st);
432
433int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
434 bool data)
435{
436 struct kvmppc_pte pte;
437 hva_t hva = *eaddr;
438
439 vcpu->stat.ld++;
440
441 if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte))
442 goto nopte;
443
444 *eaddr = pte.raddr;
445
446 hva = kvmppc_pte_to_hva(vcpu, &pte, true);
447 if (kvm_is_error_hva(hva))
448 goto mmio;
449
450 if (copy_from_user(ptr, (void __user *)hva, size)) {
451 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
452 goto mmio;
453 }
454
455 return EMULATE_DONE;
456
457nopte:
458 return -ENOENT;
459mmio:
460 return EMULATE_DO_MMIO;
461}
462EXPORT_SYMBOL_GPL(kvmppc_ld);
463
464int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
465{
466 return 0;
467}
468
469int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
470{
471 return 0;
472}
473
474void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
475{
476}
477
478int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
479 struct kvm_sregs *sregs)
480{
481 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
482}
483
484int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
485 struct kvm_sregs *sregs)
486{
487 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
488}
489
490int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
491{
492 int i;
493
494 regs->pc = kvmppc_get_pc(vcpu);
495 regs->cr = kvmppc_get_cr(vcpu);
496 regs->ctr = kvmppc_get_ctr(vcpu);
497 regs->lr = kvmppc_get_lr(vcpu);
498 regs->xer = kvmppc_get_xer(vcpu);
499 regs->msr = vcpu->arch.shared->msr;
500 regs->srr0 = vcpu->arch.shared->srr0;
501 regs->srr1 = vcpu->arch.shared->srr1;
502 regs->pid = vcpu->arch.pid;
503 regs->sprg0 = vcpu->arch.shared->sprg0;
504 regs->sprg1 = vcpu->arch.shared->sprg1;
505 regs->sprg2 = vcpu->arch.shared->sprg2;
506 regs->sprg3 = vcpu->arch.shared->sprg3;
507 regs->sprg4 = vcpu->arch.shared->sprg4;
508 regs->sprg5 = vcpu->arch.shared->sprg5;
509 regs->sprg6 = vcpu->arch.shared->sprg6;
510 regs->sprg7 = vcpu->arch.shared->sprg7;
511
512 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
513 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
514
515 return 0;
516}
517
518int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
519{
520 int i;
521
522 kvmppc_set_pc(vcpu, regs->pc);
523 kvmppc_set_cr(vcpu, regs->cr);
524 kvmppc_set_ctr(vcpu, regs->ctr);
525 kvmppc_set_lr(vcpu, regs->lr);
526 kvmppc_set_xer(vcpu, regs->xer);
527 kvmppc_set_msr(vcpu, regs->msr);
528 vcpu->arch.shared->srr0 = regs->srr0;
529 vcpu->arch.shared->srr1 = regs->srr1;
530 vcpu->arch.shared->sprg0 = regs->sprg0;
531 vcpu->arch.shared->sprg1 = regs->sprg1;
532 vcpu->arch.shared->sprg2 = regs->sprg2;
533 vcpu->arch.shared->sprg3 = regs->sprg3;
534 vcpu->arch.shared->sprg4 = regs->sprg4;
535 vcpu->arch.shared->sprg5 = regs->sprg5;
536 vcpu->arch.shared->sprg6 = regs->sprg6;
537 vcpu->arch.shared->sprg7 = regs->sprg7;
538
539 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
540 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
541
542 return 0;
543}
544
545int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
546{
547 return -ENOTSUPP;
548}
549
550int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
551{
552 return -ENOTSUPP;
553}
554
555int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
556{
557 int r;
558 union kvmppc_one_reg val;
559 int size;
560 long int i;
561
562 size = one_reg_size(reg->id);
563 if (size > sizeof(val))
564 return -EINVAL;
565
566 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
567 if (r == -EINVAL) {
568 r = 0;
569 switch (reg->id) {
570 case KVM_REG_PPC_DAR:
571 val = get_reg_val(reg->id, vcpu->arch.shared->dar);
572 break;
573 case KVM_REG_PPC_DSISR:
574 val = get_reg_val(reg->id, vcpu->arch.shared->dsisr);
575 break;
576 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
577 i = reg->id - KVM_REG_PPC_FPR0;
578 val = get_reg_val(reg->id, vcpu->arch.fpr[i]);
579 break;
580 case KVM_REG_PPC_FPSCR:
581 val = get_reg_val(reg->id, vcpu->arch.fpscr);
582 break;
583#ifdef CONFIG_ALTIVEC
584 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
585 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
586 r = -ENXIO;
587 break;
588 }
589 val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0];
590 break;
591 case KVM_REG_PPC_VSCR:
592 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
593 r = -ENXIO;
594 break;
595 }
596 val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
597 break;
598 case KVM_REG_PPC_VRSAVE:
599 val = get_reg_val(reg->id, vcpu->arch.vrsave);
600 break;
601#endif
602 case KVM_REG_PPC_DEBUG_INST: {
603 u32 opcode = INS_TW;
604 r = copy_to_user((u32 __user *)(long)reg->addr,
605 &opcode, sizeof(u32));
606 break;
607 }
608#ifdef CONFIG_KVM_XICS
609 case KVM_REG_PPC_ICP_STATE:
610 if (!vcpu->arch.icp) {
611 r = -ENXIO;
612 break;
613 }
614 val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu));
615 break;
616#endif
617 default:
618 r = -EINVAL;
619 break;
620 }
621 }
622 if (r)
623 return r;
624
625 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
626 r = -EFAULT;
627
628 return r;
629}
630
631int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
632{
633 int r;
634 union kvmppc_one_reg val;
635 int size;
636 long int i;
637
638 size = one_reg_size(reg->id);
639 if (size > sizeof(val))
640 return -EINVAL;
641
642 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
643 return -EFAULT;
644
645 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
646 if (r == -EINVAL) {
647 r = 0;
648 switch (reg->id) {
649 case KVM_REG_PPC_DAR:
650 vcpu->arch.shared->dar = set_reg_val(reg->id, val);
651 break;
652 case KVM_REG_PPC_DSISR:
653 vcpu->arch.shared->dsisr = set_reg_val(reg->id, val);
654 break;
655 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
656 i = reg->id - KVM_REG_PPC_FPR0;
657 vcpu->arch.fpr[i] = set_reg_val(reg->id, val);
658 break;
659 case KVM_REG_PPC_FPSCR:
660 vcpu->arch.fpscr = set_reg_val(reg->id, val);
661 break;
662#ifdef CONFIG_ALTIVEC
663 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
664 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
665 r = -ENXIO;
666 break;
667 }
668 vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
669 break;
670 case KVM_REG_PPC_VSCR:
671 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
672 r = -ENXIO;
673 break;
674 }
675 vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
676 break;
677 case KVM_REG_PPC_VRSAVE:
678 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
679 r = -ENXIO;
680 break;
681 }
682 vcpu->arch.vrsave = set_reg_val(reg->id, val);
683 break;
684#endif
685#ifdef CONFIG_KVM_XICS
686 case KVM_REG_PPC_ICP_STATE:
687 if (!vcpu->arch.icp) {
688 r = -ENXIO;
689 break;
690 }
691 r = kvmppc_xics_set_icp(vcpu,
692 set_reg_val(reg->id, val));
693 break;
694#endif
695 default:
696 r = -EINVAL;
697 break;
698 }
699 }
700
701 return r;
702}
703
704void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
705{
706 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
707}
708
709void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
710{
711 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
712}
713
714void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
715{
716 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
717}
718EXPORT_SYMBOL_GPL(kvmppc_set_msr);
719
720int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
721{
722 return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
723}
724
725int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
726 struct kvm_translation *tr)
727{
728 return 0;
729}
730
731int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
732 struct kvm_guest_debug *dbg)
733{
734 return -EINVAL;
735}
736
737void kvmppc_decrementer_func(unsigned long data)
738{
739 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
740
741 kvmppc_core_queue_dec(vcpu);
742 kvm_vcpu_kick(vcpu);
743}
744
745struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
746{
747 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
748}
749
750void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
751{
752 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
753}
754
755int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
756{
757 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
758}
759
760int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
761{
762 return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
763}
764
765void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
766 struct kvm_memory_slot *dont)
767{
768 kvm->arch.kvm_ops->free_memslot(free, dont);
769}
770
771int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
772 unsigned long npages)
773{
774 return kvm->arch.kvm_ops->create_memslot(slot, npages);
775}
776
777void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
778{
779 kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
780}
781
782int kvmppc_core_prepare_memory_region(struct kvm *kvm,
783 struct kvm_memory_slot *memslot,
784 struct kvm_userspace_memory_region *mem)
785{
786 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
787}
788
789void kvmppc_core_commit_memory_region(struct kvm *kvm,
790 struct kvm_userspace_memory_region *mem,
791 const struct kvm_memory_slot *old)
792{
793 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old);
794}
795
796int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
797{
798 return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
799}
800EXPORT_SYMBOL_GPL(kvm_unmap_hva);
801
802int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
803{
804 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
805}
806
807int kvm_age_hva(struct kvm *kvm, unsigned long hva)
808{
809 return kvm->arch.kvm_ops->age_hva(kvm, hva);
810}
811
812int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
813{
814 return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
815}
816
817void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
818{
819 kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
820}
821
822void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
823{
824 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
825}
826
827int kvmppc_core_init_vm(struct kvm *kvm)
828{
829
830#ifdef CONFIG_PPC64
831 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
832 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
833#endif
834
835 return kvm->arch.kvm_ops->init_vm(kvm);
836}
837
838void kvmppc_core_destroy_vm(struct kvm *kvm)
839{
840 kvm->arch.kvm_ops->destroy_vm(kvm);
841
842#ifdef CONFIG_PPC64
843 kvmppc_rtas_tokens_free(kvm);
844 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
845#endif
846}
847
848int kvmppc_core_check_processor_compat(void)
849{
850
851
852
853
854
855 return 0;
856}
857
858static int kvmppc_book3s_init(void)
859{
860 int r;
861
862 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
863 if (r)
864 return r;
865#ifdef CONFIG_KVM_BOOK3S_32
866 r = kvmppc_book3s_init_pr();
867#endif
868 return r;
869
870}
871
872static void kvmppc_book3s_exit(void)
873{
874#ifdef CONFIG_KVM_BOOK3S_32
875 kvmppc_book3s_exit_pr();
876#endif
877 kvm_exit();
878}
879
880module_init(kvmppc_book3s_init);
881module_exit(kvmppc_book3s_exit);
882