1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/kvm_host.h>
18#include <linux/err.h>
19#include <linux/export.h>
20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/miscdevice.h>
23#include <linux/gfp.h>
24#include <linux/sched.h>
25#include <linux/vmalloc.h>
26#include <linux/highmem.h>
27
28#include <asm/reg.h>
29#include <asm/cputable.h>
30#include <asm/cacheflush.h>
31#include <asm/tlbflush.h>
32#include <linux/uaccess.h>
33#include <asm/io.h>
34#include <asm/kvm_ppc.h>
35#include <asm/kvm_book3s.h>
36#include <asm/mmu_context.h>
37#include <asm/page.h>
38#include <asm/xive.h>
39
40#include "book3s.h"
41#include "trace.h"
42
43
44
45const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
46 KVM_GENERIC_VM_STATS(),
47 STATS_DESC_ICOUNTER(VM, num_2M_pages),
48 STATS_DESC_ICOUNTER(VM, num_1G_pages)
49};
50
51const struct kvm_stats_header kvm_vm_stats_header = {
52 .name_size = KVM_STATS_NAME_SIZE,
53 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
54 .id_offset = sizeof(struct kvm_stats_header),
55 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
56 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
57 sizeof(kvm_vm_stats_desc),
58};
59
60const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
61 KVM_GENERIC_VCPU_STATS(),
62 STATS_DESC_COUNTER(VCPU, sum_exits),
63 STATS_DESC_COUNTER(VCPU, mmio_exits),
64 STATS_DESC_COUNTER(VCPU, signal_exits),
65 STATS_DESC_COUNTER(VCPU, light_exits),
66 STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
67 STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
68 STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
69 STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
70 STATS_DESC_COUNTER(VCPU, syscall_exits),
71 STATS_DESC_COUNTER(VCPU, isi_exits),
72 STATS_DESC_COUNTER(VCPU, dsi_exits),
73 STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
74 STATS_DESC_COUNTER(VCPU, dec_exits),
75 STATS_DESC_COUNTER(VCPU, ext_intr_exits),
76 STATS_DESC_COUNTER(VCPU, halt_successful_wait),
77 STATS_DESC_COUNTER(VCPU, dbell_exits),
78 STATS_DESC_COUNTER(VCPU, gdbell_exits),
79 STATS_DESC_COUNTER(VCPU, ld),
80 STATS_DESC_COUNTER(VCPU, st),
81 STATS_DESC_COUNTER(VCPU, pf_storage),
82 STATS_DESC_COUNTER(VCPU, pf_instruc),
83 STATS_DESC_COUNTER(VCPU, sp_storage),
84 STATS_DESC_COUNTER(VCPU, sp_instruc),
85 STATS_DESC_COUNTER(VCPU, queue_intr),
86 STATS_DESC_COUNTER(VCPU, ld_slow),
87 STATS_DESC_COUNTER(VCPU, st_slow),
88 STATS_DESC_COUNTER(VCPU, pthru_all),
89 STATS_DESC_COUNTER(VCPU, pthru_host),
90 STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
91};
92
93const struct kvm_stats_header kvm_vcpu_stats_header = {
94 .name_size = KVM_STATS_NAME_SIZE,
95 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
96 .id_offset = sizeof(struct kvm_stats_header),
97 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
98 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
99 sizeof(kvm_vcpu_stats_desc),
100};
101
102static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
103 unsigned long pending_now, unsigned long old_pending)
104{
105 if (is_kvmppc_hv_enabled(vcpu->kvm))
106 return;
107 if (pending_now)
108 kvmppc_set_int_pending(vcpu, 1);
109 else if (old_pending)
110 kvmppc_set_int_pending(vcpu, 0);
111}
112
113static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
114{
115 ulong crit_raw;
116 ulong crit_r1;
117 bool crit;
118
119 if (is_kvmppc_hv_enabled(vcpu->kvm))
120 return false;
121
122 crit_raw = kvmppc_get_critical(vcpu);
123 crit_r1 = kvmppc_get_gpr(vcpu, 1);
124
125
126 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
127 crit_raw &= 0xffffffff;
128 crit_r1 &= 0xffffffff;
129 }
130
131
132 crit = (crit_raw == crit_r1);
133
134 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
135
136 return crit;
137}
138
139void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
140{
141 vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags);
142}
143
144static int kvmppc_book3s_vec2irqprio(unsigned int vec)
145{
146 unsigned int prio;
147
148 switch (vec) {
149 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
150 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
151 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
152 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
153 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
154 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
155 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
156 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
157 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
158 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
159 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
160 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
161 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
162 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
163 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
164 case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
165 default: prio = BOOK3S_IRQPRIO_MAX; break;
166 }
167
168 return prio;
169}
170
171void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
172 unsigned int vec)
173{
174 unsigned long old_pending = vcpu->arch.pending_exceptions;
175
176 clear_bit(kvmppc_book3s_vec2irqprio(vec),
177 &vcpu->arch.pending_exceptions);
178
179 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
180 old_pending);
181}
182
183void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
184{
185 vcpu->stat.queue_intr++;
186
187 set_bit(kvmppc_book3s_vec2irqprio(vec),
188 &vcpu->arch.pending_exceptions);
189#ifdef EXIT_DEBUG
190 printk(KERN_INFO "Queueing interrupt %x\n", vec);
191#endif
192}
193EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
194
195void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
196{
197
198 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags);
199}
200EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
201
202void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
203{
204
205 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
206}
207EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
208
209void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
210{
211
212 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
213}
214
215void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
216{
217
218 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
219}
220
221void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
222{
223
224 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
225}
226
227void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
228{
229 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
230}
231EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
232
233int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
234{
235 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
236}
237EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
238
239void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
240{
241 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
242}
243EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
244
245void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
246 struct kvm_interrupt *irq)
247{
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268 if (irq->irq == KVM_INTERRUPT_SET)
269 vcpu->arch.external_oneshot = 1;
270
271 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
272}
273
274void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
275{
276 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
277}
278
279void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
280 ulong flags)
281{
282 kvmppc_set_dar(vcpu, dar);
283 kvmppc_set_dsisr(vcpu, flags);
284 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
285}
286EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
287
288void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
289{
290 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
291}
292EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
293
294static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
295 unsigned int priority)
296{
297 int deliver = 1;
298 int vec = 0;
299 bool crit = kvmppc_critical_section(vcpu);
300
301 switch (priority) {
302 case BOOK3S_IRQPRIO_DECREMENTER:
303 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
304 vec = BOOK3S_INTERRUPT_DECREMENTER;
305 break;
306 case BOOK3S_IRQPRIO_EXTERNAL:
307 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
308 vec = BOOK3S_INTERRUPT_EXTERNAL;
309 break;
310 case BOOK3S_IRQPRIO_SYSTEM_RESET:
311 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
312 break;
313 case BOOK3S_IRQPRIO_MACHINE_CHECK:
314 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
315 break;
316 case BOOK3S_IRQPRIO_DATA_STORAGE:
317 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
318 break;
319 case BOOK3S_IRQPRIO_INST_STORAGE:
320 vec = BOOK3S_INTERRUPT_INST_STORAGE;
321 break;
322 case BOOK3S_IRQPRIO_DATA_SEGMENT:
323 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
324 break;
325 case BOOK3S_IRQPRIO_INST_SEGMENT:
326 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
327 break;
328 case BOOK3S_IRQPRIO_ALIGNMENT:
329 vec = BOOK3S_INTERRUPT_ALIGNMENT;
330 break;
331 case BOOK3S_IRQPRIO_PROGRAM:
332 vec = BOOK3S_INTERRUPT_PROGRAM;
333 break;
334 case BOOK3S_IRQPRIO_VSX:
335 vec = BOOK3S_INTERRUPT_VSX;
336 break;
337 case BOOK3S_IRQPRIO_ALTIVEC:
338 vec = BOOK3S_INTERRUPT_ALTIVEC;
339 break;
340 case BOOK3S_IRQPRIO_FP_UNAVAIL:
341 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
342 break;
343 case BOOK3S_IRQPRIO_SYSCALL:
344 vec = BOOK3S_INTERRUPT_SYSCALL;
345 break;
346 case BOOK3S_IRQPRIO_DEBUG:
347 vec = BOOK3S_INTERRUPT_TRACE;
348 break;
349 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
350 vec = BOOK3S_INTERRUPT_PERFMON;
351 break;
352 case BOOK3S_IRQPRIO_FAC_UNAVAIL:
353 vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
354 break;
355 default:
356 deliver = 0;
357 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
358 break;
359 }
360
361#if 0
362 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
363#endif
364
365 if (deliver)
366 kvmppc_inject_interrupt(vcpu, vec, 0);
367
368 return deliver;
369}
370
371
372
373
374static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
375{
376 switch (priority) {
377 case BOOK3S_IRQPRIO_DECREMENTER:
378
379 return false;
380 case BOOK3S_IRQPRIO_EXTERNAL:
381
382
383
384
385
386 if (vcpu->arch.external_oneshot) {
387 vcpu->arch.external_oneshot = 0;
388 return true;
389 }
390 return false;
391 }
392
393 return true;
394}
395
396int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
397{
398 unsigned long *pending = &vcpu->arch.pending_exceptions;
399 unsigned long old_pending = vcpu->arch.pending_exceptions;
400 unsigned int priority;
401
402#ifdef EXIT_DEBUG
403 if (vcpu->arch.pending_exceptions)
404 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
405#endif
406 priority = __ffs(*pending);
407 while (priority < BOOK3S_IRQPRIO_MAX) {
408 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
409 clear_irqprio(vcpu, priority)) {
410 clear_bit(priority, &vcpu->arch.pending_exceptions);
411 break;
412 }
413
414 priority = find_next_bit(pending,
415 BITS_PER_BYTE * sizeof(*pending),
416 priority + 1);
417 }
418
419
420 kvmppc_update_int_pending(vcpu, *pending, old_pending);
421
422 return 0;
423}
424EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
425
426kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
427 bool *writable)
428{
429 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
430 gfn_t gfn = gpa >> PAGE_SHIFT;
431
432 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
433 mp_pa = (uint32_t)mp_pa;
434
435
436 gpa &= ~0xFFFULL;
437 if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
438 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
439 kvm_pfn_t pfn;
440
441 pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
442 get_page(pfn_to_page(pfn));
443 if (writable)
444 *writable = true;
445 return pfn;
446 }
447
448 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
449}
450EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
451
452int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
453 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
454{
455 bool data = (xlid == XLATE_DATA);
456 bool iswrite = (xlrw == XLATE_WRITE);
457 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
458 int r;
459
460 if (relocated) {
461 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
462 } else {
463 pte->eaddr = eaddr;
464 pte->raddr = eaddr & KVM_PAM;
465 pte->vpage = VSID_REAL | eaddr >> 12;
466 pte->may_read = true;
467 pte->may_write = true;
468 pte->may_execute = true;
469 r = 0;
470
471 if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
472 !data) {
473 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
474 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
475 pte->raddr &= ~SPLIT_HACK_MASK;
476 }
477 }
478
479 return r;
480}
481
482int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
483 enum instruction_fetch_type type, u32 *inst)
484{
485 ulong pc = kvmppc_get_pc(vcpu);
486 int r;
487
488 if (type == INST_SC)
489 pc -= 4;
490
491 r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
492 if (r == EMULATE_DONE)
493 return r;
494 else
495 return EMULATE_AGAIN;
496}
497EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
498
499int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
500{
501 return 0;
502}
503
504void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
505{
506}
507
508int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
509 struct kvm_sregs *sregs)
510{
511 int ret;
512
513 vcpu_load(vcpu);
514 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
515 vcpu_put(vcpu);
516
517 return ret;
518}
519
520int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
521 struct kvm_sregs *sregs)
522{
523 int ret;
524
525 vcpu_load(vcpu);
526 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
527 vcpu_put(vcpu);
528
529 return ret;
530}
531
532int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
533{
534 int i;
535
536 regs->pc = kvmppc_get_pc(vcpu);
537 regs->cr = kvmppc_get_cr(vcpu);
538 regs->ctr = kvmppc_get_ctr(vcpu);
539 regs->lr = kvmppc_get_lr(vcpu);
540 regs->xer = kvmppc_get_xer(vcpu);
541 regs->msr = kvmppc_get_msr(vcpu);
542 regs->srr0 = kvmppc_get_srr0(vcpu);
543 regs->srr1 = kvmppc_get_srr1(vcpu);
544 regs->pid = vcpu->arch.pid;
545 regs->sprg0 = kvmppc_get_sprg0(vcpu);
546 regs->sprg1 = kvmppc_get_sprg1(vcpu);
547 regs->sprg2 = kvmppc_get_sprg2(vcpu);
548 regs->sprg3 = kvmppc_get_sprg3(vcpu);
549 regs->sprg4 = kvmppc_get_sprg4(vcpu);
550 regs->sprg5 = kvmppc_get_sprg5(vcpu);
551 regs->sprg6 = kvmppc_get_sprg6(vcpu);
552 regs->sprg7 = kvmppc_get_sprg7(vcpu);
553
554 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
555 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
556
557 return 0;
558}
559
560int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
561{
562 int i;
563
564 kvmppc_set_pc(vcpu, regs->pc);
565 kvmppc_set_cr(vcpu, regs->cr);
566 kvmppc_set_ctr(vcpu, regs->ctr);
567 kvmppc_set_lr(vcpu, regs->lr);
568 kvmppc_set_xer(vcpu, regs->xer);
569 kvmppc_set_msr(vcpu, regs->msr);
570 kvmppc_set_srr0(vcpu, regs->srr0);
571 kvmppc_set_srr1(vcpu, regs->srr1);
572 kvmppc_set_sprg0(vcpu, regs->sprg0);
573 kvmppc_set_sprg1(vcpu, regs->sprg1);
574 kvmppc_set_sprg2(vcpu, regs->sprg2);
575 kvmppc_set_sprg3(vcpu, regs->sprg3);
576 kvmppc_set_sprg4(vcpu, regs->sprg4);
577 kvmppc_set_sprg5(vcpu, regs->sprg5);
578 kvmppc_set_sprg6(vcpu, regs->sprg6);
579 kvmppc_set_sprg7(vcpu, regs->sprg7);
580
581 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
582 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
583
584 return 0;
585}
586
587int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
588{
589 return -EOPNOTSUPP;
590}
591
592int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
593{
594 return -EOPNOTSUPP;
595}
596
597int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
598 union kvmppc_one_reg *val)
599{
600 int r = 0;
601 long int i;
602
603 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
604 if (r == -EINVAL) {
605 r = 0;
606 switch (id) {
607 case KVM_REG_PPC_DAR:
608 *val = get_reg_val(id, kvmppc_get_dar(vcpu));
609 break;
610 case KVM_REG_PPC_DSISR:
611 *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
612 break;
613 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
614 i = id - KVM_REG_PPC_FPR0;
615 *val = get_reg_val(id, VCPU_FPR(vcpu, i));
616 break;
617 case KVM_REG_PPC_FPSCR:
618 *val = get_reg_val(id, vcpu->arch.fp.fpscr);
619 break;
620#ifdef CONFIG_VSX
621 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
622 if (cpu_has_feature(CPU_FTR_VSX)) {
623 i = id - KVM_REG_PPC_VSR0;
624 val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
625 val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
626 } else {
627 r = -ENXIO;
628 }
629 break;
630#endif
631 case KVM_REG_PPC_DEBUG_INST:
632 *val = get_reg_val(id, INS_TW);
633 break;
634#ifdef CONFIG_KVM_XICS
635 case KVM_REG_PPC_ICP_STATE:
636 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
637 r = -ENXIO;
638 break;
639 }
640 if (xics_on_xive())
641 *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
642 else
643 *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
644 break;
645#endif
646#ifdef CONFIG_KVM_XIVE
647 case KVM_REG_PPC_VP_STATE:
648 if (!vcpu->arch.xive_vcpu) {
649 r = -ENXIO;
650 break;
651 }
652 if (xive_enabled())
653 r = kvmppc_xive_native_get_vp(vcpu, val);
654 else
655 r = -ENXIO;
656 break;
657#endif
658 case KVM_REG_PPC_FSCR:
659 *val = get_reg_val(id, vcpu->arch.fscr);
660 break;
661 case KVM_REG_PPC_TAR:
662 *val = get_reg_val(id, vcpu->arch.tar);
663 break;
664 case KVM_REG_PPC_EBBHR:
665 *val = get_reg_val(id, vcpu->arch.ebbhr);
666 break;
667 case KVM_REG_PPC_EBBRR:
668 *val = get_reg_val(id, vcpu->arch.ebbrr);
669 break;
670 case KVM_REG_PPC_BESCR:
671 *val = get_reg_val(id, vcpu->arch.bescr);
672 break;
673 case KVM_REG_PPC_IC:
674 *val = get_reg_val(id, vcpu->arch.ic);
675 break;
676 default:
677 r = -EINVAL;
678 break;
679 }
680 }
681
682 return r;
683}
684
685int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
686 union kvmppc_one_reg *val)
687{
688 int r = 0;
689 long int i;
690
691 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
692 if (r == -EINVAL) {
693 r = 0;
694 switch (id) {
695 case KVM_REG_PPC_DAR:
696 kvmppc_set_dar(vcpu, set_reg_val(id, *val));
697 break;
698 case KVM_REG_PPC_DSISR:
699 kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
700 break;
701 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
702 i = id - KVM_REG_PPC_FPR0;
703 VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
704 break;
705 case KVM_REG_PPC_FPSCR:
706 vcpu->arch.fp.fpscr = set_reg_val(id, *val);
707 break;
708#ifdef CONFIG_VSX
709 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
710 if (cpu_has_feature(CPU_FTR_VSX)) {
711 i = id - KVM_REG_PPC_VSR0;
712 vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
713 vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
714 } else {
715 r = -ENXIO;
716 }
717 break;
718#endif
719#ifdef CONFIG_KVM_XICS
720 case KVM_REG_PPC_ICP_STATE:
721 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
722 r = -ENXIO;
723 break;
724 }
725 if (xics_on_xive())
726 r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
727 else
728 r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
729 break;
730#endif
731#ifdef CONFIG_KVM_XIVE
732 case KVM_REG_PPC_VP_STATE:
733 if (!vcpu->arch.xive_vcpu) {
734 r = -ENXIO;
735 break;
736 }
737 if (xive_enabled())
738 r = kvmppc_xive_native_set_vp(vcpu, val);
739 else
740 r = -ENXIO;
741 break;
742#endif
743 case KVM_REG_PPC_FSCR:
744 vcpu->arch.fscr = set_reg_val(id, *val);
745 break;
746 case KVM_REG_PPC_TAR:
747 vcpu->arch.tar = set_reg_val(id, *val);
748 break;
749 case KVM_REG_PPC_EBBHR:
750 vcpu->arch.ebbhr = set_reg_val(id, *val);
751 break;
752 case KVM_REG_PPC_EBBRR:
753 vcpu->arch.ebbrr = set_reg_val(id, *val);
754 break;
755 case KVM_REG_PPC_BESCR:
756 vcpu->arch.bescr = set_reg_val(id, *val);
757 break;
758 case KVM_REG_PPC_IC:
759 vcpu->arch.ic = set_reg_val(id, *val);
760 break;
761 default:
762 r = -EINVAL;
763 break;
764 }
765 }
766
767 return r;
768}
769
770void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
771{
772 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
773}
774
775void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
776{
777 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
778}
779
780void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
781{
782 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
783}
784EXPORT_SYMBOL_GPL(kvmppc_set_msr);
785
786int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
787{
788 return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu);
789}
790
791int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
792 struct kvm_translation *tr)
793{
794 return 0;
795}
796
797int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
798 struct kvm_guest_debug *dbg)
799{
800 vcpu_load(vcpu);
801 vcpu->guest_debug = dbg->control;
802 vcpu_put(vcpu);
803 return 0;
804}
805
806void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
807{
808 kvmppc_core_queue_dec(vcpu);
809 kvm_vcpu_kick(vcpu);
810}
811
812int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
813{
814 return vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
815}
816
817void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
818{
819 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
820}
821
822int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
823{
824 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
825}
826
827void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
828{
829
830}
831
832int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
833{
834 return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
835}
836
837void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
838{
839 kvm->arch.kvm_ops->free_memslot(slot);
840}
841
842void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
843{
844 kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
845}
846
847int kvmppc_core_prepare_memory_region(struct kvm *kvm,
848 struct kvm_memory_slot *memslot,
849 const struct kvm_userspace_memory_region *mem,
850 enum kvm_mr_change change)
851{
852 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem,
853 change);
854}
855
856void kvmppc_core_commit_memory_region(struct kvm *kvm,
857 const struct kvm_userspace_memory_region *mem,
858 const struct kvm_memory_slot *old,
859 const struct kvm_memory_slot *new,
860 enum kvm_mr_change change)
861{
862 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
863}
864
865int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
866 unsigned flags)
867{
868 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
869}
870
871int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
872{
873 return kvm->arch.kvm_ops->age_hva(kvm, start, end);
874}
875
876int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
877{
878 return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
879}
880
881int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
882{
883 kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
884 return 0;
885}
886
887int kvmppc_core_init_vm(struct kvm *kvm)
888{
889
890#ifdef CONFIG_PPC64
891 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
892 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
893 mutex_init(&kvm->arch.rtas_token_lock);
894#endif
895
896 return kvm->arch.kvm_ops->init_vm(kvm);
897}
898
899void kvmppc_core_destroy_vm(struct kvm *kvm)
900{
901 kvm->arch.kvm_ops->destroy_vm(kvm);
902
903#ifdef CONFIG_PPC64
904 kvmppc_rtas_tokens_free(kvm);
905 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
906#endif
907
908#ifdef CONFIG_KVM_XICS
909
910
911
912
913 kfree(kvm->arch.xive_devices.native);
914 kvm->arch.xive_devices.native = NULL;
915 kfree(kvm->arch.xive_devices.xics_on_xive);
916 kvm->arch.xive_devices.xics_on_xive = NULL;
917 kfree(kvm->arch.xics_device);
918 kvm->arch.xics_device = NULL;
919#endif
920}
921
922int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
923{
924 unsigned long size = kvmppc_get_gpr(vcpu, 4);
925 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
926 u64 buf;
927 int srcu_idx;
928 int ret;
929
930 if (!is_power_of_2(size) || (size > sizeof(buf)))
931 return H_TOO_HARD;
932
933 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
934 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
935 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
936 if (ret != 0)
937 return H_TOO_HARD;
938
939 switch (size) {
940 case 1:
941 kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
942 break;
943
944 case 2:
945 kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
946 break;
947
948 case 4:
949 kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
950 break;
951
952 case 8:
953 kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
954 break;
955
956 default:
957 BUG();
958 }
959
960 return H_SUCCESS;
961}
962EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
963
964int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
965{
966 unsigned long size = kvmppc_get_gpr(vcpu, 4);
967 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
968 unsigned long val = kvmppc_get_gpr(vcpu, 6);
969 u64 buf;
970 int srcu_idx;
971 int ret;
972
973 switch (size) {
974 case 1:
975 *(u8 *)&buf = val;
976 break;
977
978 case 2:
979 *(__be16 *)&buf = cpu_to_be16(val);
980 break;
981
982 case 4:
983 *(__be32 *)&buf = cpu_to_be32(val);
984 break;
985
986 case 8:
987 *(__be64 *)&buf = cpu_to_be64(val);
988 break;
989
990 default:
991 return H_TOO_HARD;
992 }
993
994 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
995 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
996 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
997 if (ret != 0)
998 return H_TOO_HARD;
999
1000 return H_SUCCESS;
1001}
1002EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
1003
1004int kvmppc_core_check_processor_compat(void)
1005{
1006
1007
1008
1009
1010
1011 return 0;
1012}
1013
1014int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
1015{
1016 return kvm->arch.kvm_ops->hcall_implemented(hcall);
1017}
1018
1019#ifdef CONFIG_KVM_XICS
1020int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1021 bool line_status)
1022{
1023 if (xics_on_xive())
1024 return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
1025 line_status);
1026 else
1027 return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
1028 line_status);
1029}
1030
1031int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
1032 struct kvm *kvm, int irq_source_id,
1033 int level, bool line_status)
1034{
1035 return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
1036 level, line_status);
1037}
1038static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
1039 struct kvm *kvm, int irq_source_id, int level,
1040 bool line_status)
1041{
1042 return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
1043}
1044
1045int kvm_irq_map_gsi(struct kvm *kvm,
1046 struct kvm_kernel_irq_routing_entry *entries, int gsi)
1047{
1048 entries->gsi = gsi;
1049 entries->type = KVM_IRQ_ROUTING_IRQCHIP;
1050 entries->set = kvmppc_book3s_set_irq;
1051 entries->irqchip.irqchip = 0;
1052 entries->irqchip.pin = gsi;
1053 return 1;
1054}
1055
1056int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
1057{
1058 return pin;
1059}
1060
1061#endif
1062
1063static int kvmppc_book3s_init(void)
1064{
1065 int r;
1066
1067 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1068 if (r)
1069 return r;
1070#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1071 r = kvmppc_book3s_init_pr();
1072#endif
1073
1074#ifdef CONFIG_KVM_XICS
1075#ifdef CONFIG_KVM_XIVE
1076 if (xics_on_xive()) {
1077 kvmppc_xive_init_module();
1078 kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
1079 if (kvmppc_xive_native_supported()) {
1080 kvmppc_xive_native_init_module();
1081 kvm_register_device_ops(&kvm_xive_native_ops,
1082 KVM_DEV_TYPE_XIVE);
1083 }
1084 } else
1085#endif
1086 kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
1087#endif
1088 return r;
1089}
1090
1091static void kvmppc_book3s_exit(void)
1092{
1093#ifdef CONFIG_KVM_XICS
1094 if (xics_on_xive()) {
1095 kvmppc_xive_exit_module();
1096 kvmppc_xive_native_exit_module();
1097 }
1098#endif
1099#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1100 kvmppc_book3s_exit_pr();
1101#endif
1102 kvm_exit();
1103}
1104
1105module_init(kvmppc_book3s_init);
1106module_exit(kvmppc_book3s_exit);
1107
1108
1109#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1110MODULE_ALIAS_MISCDEV(KVM_MINOR);
1111MODULE_ALIAS("devname:kvm");
1112#endif
1113