1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/kvm_host.h>
27#include <linux/gfp.h>
28#include <linux/module.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
31
32#include <asm/cputable.h>
33#include <linux/uaccess.h>
34#include <asm/kvm_ppc.h>
35#include <asm/cacheflush.h>
36#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
39#include <asm/time.h>
40
41#include "timing.h"
42#include "booke.h"
43
44#define CREATE_TRACE_POINTS
45#include "trace_booke.h"
46
47unsigned long kvmppc_booke_handlers;
48
49#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
50#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
51
52struct kvm_stats_debugfs_item debugfs_entries[] = {
53 { "mmio", VCPU_STAT(mmio_exits) },
54 { "sig", VCPU_STAT(signal_exits) },
55 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
56 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
57 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
58 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
59 { "sysc", VCPU_STAT(syscall_exits) },
60 { "isi", VCPU_STAT(isi_exits) },
61 { "dsi", VCPU_STAT(dsi_exits) },
62 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
63 { "dec", VCPU_STAT(dec_exits) },
64 { "ext_intr", VCPU_STAT(ext_intr_exits) },
65 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
66 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
67 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
68 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
69 { "doorbell", VCPU_STAT(dbell_exits) },
70 { "guest doorbell", VCPU_STAT(gdbell_exits) },
71 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
72 { NULL }
73};
74
75
76void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
77{
78 int i;
79
80 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
81 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
82 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
83 vcpu->arch.shared->srr1);
84
85 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
86
87 for (i = 0; i < 32; i += 4) {
88 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
89 kvmppc_get_gpr(vcpu, i),
90 kvmppc_get_gpr(vcpu, i+1),
91 kvmppc_get_gpr(vcpu, i+2),
92 kvmppc_get_gpr(vcpu, i+3));
93 }
94}
95
96#ifdef CONFIG_SPE
97void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
98{
99 preempt_disable();
100 enable_kernel_spe();
101 kvmppc_save_guest_spe(vcpu);
102 disable_kernel_spe();
103 vcpu->arch.shadow_msr &= ~MSR_SPE;
104 preempt_enable();
105}
106
107static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
108{
109 preempt_disable();
110 enable_kernel_spe();
111 kvmppc_load_guest_spe(vcpu);
112 disable_kernel_spe();
113 vcpu->arch.shadow_msr |= MSR_SPE;
114 preempt_enable();
115}
116
117static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
118{
119 if (vcpu->arch.shared->msr & MSR_SPE) {
120 if (!(vcpu->arch.shadow_msr & MSR_SPE))
121 kvmppc_vcpu_enable_spe(vcpu);
122 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
123 kvmppc_vcpu_disable_spe(vcpu);
124 }
125}
126#else
127static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
128{
129}
130#endif
131
132
133
134
135
136
137
138
139
140
141static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
142{
143#ifdef CONFIG_PPC_FPU
144 if (!(current->thread.regs->msr & MSR_FP)) {
145 enable_kernel_fp();
146 load_fp_state(&vcpu->arch.fp);
147 disable_kernel_fp();
148 current->thread.fp_save_area = &vcpu->arch.fp;
149 current->thread.regs->msr |= MSR_FP;
150 }
151#endif
152}
153
154
155
156
157
158static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
159{
160#ifdef CONFIG_PPC_FPU
161 if (current->thread.regs->msr & MSR_FP)
162 giveup_fpu(current);
163 current->thread.fp_save_area = NULL;
164#endif
165}
166
167static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
168{
169#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
170
171
172 vcpu->arch.shadow_msr &= ~MSR_FP;
173 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
174#endif
175}
176
177
178
179
180
181
182static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
183{
184#ifdef CONFIG_ALTIVEC
185 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
186 if (!(current->thread.regs->msr & MSR_VEC)) {
187 enable_kernel_altivec();
188 load_vr_state(&vcpu->arch.vr);
189 disable_kernel_altivec();
190 current->thread.vr_save_area = &vcpu->arch.vr;
191 current->thread.regs->msr |= MSR_VEC;
192 }
193 }
194#endif
195}
196
197
198
199
200
201static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
202{
203#ifdef CONFIG_ALTIVEC
204 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
205 if (current->thread.regs->msr & MSR_VEC)
206 giveup_altivec(current);
207 current->thread.vr_save_area = NULL;
208 }
209#endif
210}
211
212static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
213{
214
215#ifndef CONFIG_KVM_BOOKE_HV
216 vcpu->arch.shadow_msr &= ~MSR_DE;
217 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
218#endif
219
220
221 if (vcpu->guest_debug) {
222#ifdef CONFIG_KVM_BOOKE_HV
223
224
225
226
227 vcpu->arch.shared->msr |= MSR_DE;
228#else
229 vcpu->arch.shadow_msr |= MSR_DE;
230 vcpu->arch.shared->msr &= ~MSR_DE;
231#endif
232 }
233}
234
235
236
237
238
239void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
240{
241 u32 old_msr = vcpu->arch.shared->msr;
242
243#ifdef CONFIG_KVM_BOOKE_HV
244 new_msr |= MSR_GS;
245#endif
246
247 vcpu->arch.shared->msr = new_msr;
248
249 kvmppc_mmu_msr_notify(vcpu, old_msr);
250 kvmppc_vcpu_sync_spe(vcpu);
251 kvmppc_vcpu_sync_fpu(vcpu);
252 kvmppc_vcpu_sync_debug(vcpu);
253}
254
255static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
256 unsigned int priority)
257{
258 trace_kvm_booke_queue_irqprio(vcpu, priority);
259 set_bit(priority, &vcpu->arch.pending_exceptions);
260}
261
262void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
263 ulong dear_flags, ulong esr_flags)
264{
265 vcpu->arch.queued_dear = dear_flags;
266 vcpu->arch.queued_esr = esr_flags;
267 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
268}
269
270void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
271 ulong dear_flags, ulong esr_flags)
272{
273 vcpu->arch.queued_dear = dear_flags;
274 vcpu->arch.queued_esr = esr_flags;
275 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
276}
277
278void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
279{
280 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
281}
282
283void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
284{
285 vcpu->arch.queued_esr = esr_flags;
286 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
287}
288
289static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
290 ulong esr_flags)
291{
292 vcpu->arch.queued_dear = dear_flags;
293 vcpu->arch.queued_esr = esr_flags;
294 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
295}
296
297void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
298{
299 vcpu->arch.queued_esr = esr_flags;
300 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
301}
302
303void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
304{
305 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
306}
307
308void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
309{
310 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
311}
312
313int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
314{
315 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
316}
317
318void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
319{
320 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
321}
322
323void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
324 struct kvm_interrupt *irq)
325{
326 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
327
328 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
329 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
330
331 kvmppc_booke_queue_irqprio(vcpu, prio);
332}
333
334void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
335{
336 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
337 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
338}
339
340static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
341{
342 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
343}
344
345static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
346{
347 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
348}
349
350void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
351{
352 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
353}
354
355void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
356{
357 clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
358}
359
360static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
361{
362 kvmppc_set_srr0(vcpu, srr0);
363 kvmppc_set_srr1(vcpu, srr1);
364}
365
366static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
367{
368 vcpu->arch.csrr0 = srr0;
369 vcpu->arch.csrr1 = srr1;
370}
371
372static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
373{
374 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
375 vcpu->arch.dsrr0 = srr0;
376 vcpu->arch.dsrr1 = srr1;
377 } else {
378 set_guest_csrr(vcpu, srr0, srr1);
379 }
380}
381
382static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
383{
384 vcpu->arch.mcsrr0 = srr0;
385 vcpu->arch.mcsrr1 = srr1;
386}
387
388
389static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
390 unsigned int priority)
391{
392 int allowed = 0;
393 ulong msr_mask = 0;
394 bool update_esr = false, update_dear = false, update_epr = false;
395 ulong crit_raw = vcpu->arch.shared->critical;
396 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
397 bool crit;
398 bool keep_irq = false;
399 enum int_class int_class;
400 ulong new_msr = vcpu->arch.shared->msr;
401
402
403 if (!(vcpu->arch.shared->msr & MSR_SF)) {
404 crit_raw &= 0xffffffff;
405 crit_r1 &= 0xffffffff;
406 }
407
408
409 crit = (crit_raw == crit_r1);
410
411 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
412
413 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
414 priority = BOOKE_IRQPRIO_EXTERNAL;
415 keep_irq = true;
416 }
417
418 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
419 update_epr = true;
420
421 switch (priority) {
422 case BOOKE_IRQPRIO_DTLB_MISS:
423 case BOOKE_IRQPRIO_DATA_STORAGE:
424 case BOOKE_IRQPRIO_ALIGNMENT:
425 update_dear = true;
426
427 case BOOKE_IRQPRIO_INST_STORAGE:
428 case BOOKE_IRQPRIO_PROGRAM:
429 update_esr = true;
430
431 case BOOKE_IRQPRIO_ITLB_MISS:
432 case BOOKE_IRQPRIO_SYSCALL:
433 case BOOKE_IRQPRIO_FP_UNAVAIL:
434#ifdef CONFIG_SPE_POSSIBLE
435 case BOOKE_IRQPRIO_SPE_UNAVAIL:
436 case BOOKE_IRQPRIO_SPE_FP_DATA:
437 case BOOKE_IRQPRIO_SPE_FP_ROUND:
438#endif
439#ifdef CONFIG_ALTIVEC
440 case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
441 case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
442#endif
443 case BOOKE_IRQPRIO_AP_UNAVAIL:
444 allowed = 1;
445 msr_mask = MSR_CE | MSR_ME | MSR_DE;
446 int_class = INT_CLASS_NONCRIT;
447 break;
448 case BOOKE_IRQPRIO_WATCHDOG:
449 case BOOKE_IRQPRIO_CRITICAL:
450 case BOOKE_IRQPRIO_DBELL_CRIT:
451 allowed = vcpu->arch.shared->msr & MSR_CE;
452 allowed = allowed && !crit;
453 msr_mask = MSR_ME;
454 int_class = INT_CLASS_CRIT;
455 break;
456 case BOOKE_IRQPRIO_MACHINE_CHECK:
457 allowed = vcpu->arch.shared->msr & MSR_ME;
458 allowed = allowed && !crit;
459 int_class = INT_CLASS_MC;
460 break;
461 case BOOKE_IRQPRIO_DECREMENTER:
462 case BOOKE_IRQPRIO_FIT:
463 keep_irq = true;
464
465 case BOOKE_IRQPRIO_EXTERNAL:
466 case BOOKE_IRQPRIO_DBELL:
467 allowed = vcpu->arch.shared->msr & MSR_EE;
468 allowed = allowed && !crit;
469 msr_mask = MSR_CE | MSR_ME | MSR_DE;
470 int_class = INT_CLASS_NONCRIT;
471 break;
472 case BOOKE_IRQPRIO_DEBUG:
473 allowed = vcpu->arch.shared->msr & MSR_DE;
474 allowed = allowed && !crit;
475 msr_mask = MSR_ME;
476 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
477 int_class = INT_CLASS_DBG;
478 else
479 int_class = INT_CLASS_CRIT;
480
481 break;
482 }
483
484 if (allowed) {
485 switch (int_class) {
486 case INT_CLASS_NONCRIT:
487 set_guest_srr(vcpu, vcpu->arch.pc,
488 vcpu->arch.shared->msr);
489 break;
490 case INT_CLASS_CRIT:
491 set_guest_csrr(vcpu, vcpu->arch.pc,
492 vcpu->arch.shared->msr);
493 break;
494 case INT_CLASS_DBG:
495 set_guest_dsrr(vcpu, vcpu->arch.pc,
496 vcpu->arch.shared->msr);
497 break;
498 case INT_CLASS_MC:
499 set_guest_mcsrr(vcpu, vcpu->arch.pc,
500 vcpu->arch.shared->msr);
501 break;
502 }
503
504 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
505 if (update_esr == true)
506 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
507 if (update_dear == true)
508 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
509 if (update_epr == true) {
510 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
511 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
512 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
513 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
514 kvmppc_mpic_set_epr(vcpu);
515 }
516 }
517
518 new_msr &= msr_mask;
519#if defined(CONFIG_64BIT)
520 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
521 new_msr |= MSR_CM;
522#endif
523 kvmppc_set_msr(vcpu, new_msr);
524
525 if (!keep_irq)
526 clear_bit(priority, &vcpu->arch.pending_exceptions);
527 }
528
529#ifdef CONFIG_KVM_BOOKE_HV
530
531
532
533
534
535 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
536 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
537 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
538 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
539 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
540 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
541#endif
542
543 return allowed;
544}
545
546
547
548
549
550
551static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
552{
553 u64 tb, wdt_tb, wdt_ticks = 0;
554 u64 nr_jiffies = 0;
555 u32 period = TCR_GET_WP(vcpu->arch.tcr);
556
557 wdt_tb = 1ULL << (63 - period);
558 tb = get_tb();
559
560
561
562
563 if (tb & wdt_tb)
564 wdt_ticks = wdt_tb;
565
566 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
567
568
569 nr_jiffies = wdt_ticks;
570
571 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
572 nr_jiffies++;
573
574 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
575}
576
577static void arm_next_watchdog(struct kvm_vcpu *vcpu)
578{
579 unsigned long nr_jiffies;
580 unsigned long flags;
581
582
583
584
585
586 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
587 kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
588
589 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
590 nr_jiffies = watchdog_next_timeout(vcpu);
591
592
593
594
595 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
596 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
597 else
598 del_timer(&vcpu->arch.wdt_timer);
599 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
600}
601
602void kvmppc_watchdog_func(unsigned long data)
603{
604 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
605 u32 tsr, new_tsr;
606 int final;
607
608 do {
609 new_tsr = tsr = vcpu->arch.tsr;
610 final = 0;
611
612
613 if (tsr & TSR_ENW) {
614 if (tsr & TSR_WIS)
615 final = 1;
616 else
617 new_tsr = tsr | TSR_WIS;
618 } else {
619 new_tsr = tsr | TSR_ENW;
620 }
621 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
622
623 if (new_tsr & TSR_WIS) {
624 smp_wmb();
625 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
626 kvm_vcpu_kick(vcpu);
627 }
628
629
630
631
632
633 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
634 vcpu->arch.watchdog_enabled) {
635 smp_wmb();
636 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
637 kvm_vcpu_kick(vcpu);
638 }
639
640
641
642
643
644
645
646 if (!final)
647 arm_next_watchdog(vcpu);
648}
649
650static void update_timer_ints(struct kvm_vcpu *vcpu)
651{
652 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
653 kvmppc_core_queue_dec(vcpu);
654 else
655 kvmppc_core_dequeue_dec(vcpu);
656
657 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
658 kvmppc_core_queue_watchdog(vcpu);
659 else
660 kvmppc_core_dequeue_watchdog(vcpu);
661}
662
663static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
664{
665 unsigned long *pending = &vcpu->arch.pending_exceptions;
666 unsigned int priority;
667
668 priority = __ffs(*pending);
669 while (priority < BOOKE_IRQPRIO_MAX) {
670 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
671 break;
672
673 priority = find_next_bit(pending,
674 BITS_PER_BYTE * sizeof(*pending),
675 priority + 1);
676 }
677
678
679 vcpu->arch.shared->int_pending = !!*pending;
680}
681
682
683int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
684{
685 int r = 0;
686 WARN_ON_ONCE(!irqs_disabled());
687
688 kvmppc_core_check_exceptions(vcpu);
689
690 if (kvm_request_pending(vcpu)) {
691
692 return 1;
693 }
694
695 if (vcpu->arch.shared->msr & MSR_WE) {
696 local_irq_enable();
697 kvm_vcpu_block(vcpu);
698 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
699 hard_irq_disable();
700
701 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
702 r = 1;
703 };
704
705 return r;
706}
707
708int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
709{
710 int r = 1;
711
712 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
713 update_timer_ints(vcpu);
714#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
715 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
716 kvmppc_core_flush_tlb(vcpu);
717#endif
718
719 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
720 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
721 r = 0;
722 }
723
724 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
725 vcpu->run->epr.epr = 0;
726 vcpu->arch.epr_needed = true;
727 vcpu->run->exit_reason = KVM_EXIT_EPR;
728 r = 0;
729 }
730
731 return r;
732}
733
734int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
735{
736 int ret, s;
737 struct debug_reg debug;
738
739 if (!vcpu->arch.sane) {
740 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
741 return -EINVAL;
742 }
743
744 s = kvmppc_prepare_to_enter(vcpu);
745 if (s <= 0) {
746 ret = s;
747 goto out;
748 }
749
750
751#ifdef CONFIG_PPC_FPU
752
753 enable_kernel_fp();
754
755
756
757
758
759 kvmppc_load_guest_fp(vcpu);
760#endif
761
762#ifdef CONFIG_ALTIVEC
763
764 if (cpu_has_feature(CPU_FTR_ALTIVEC))
765 enable_kernel_altivec();
766
767
768
769
770 kvmppc_load_guest_altivec(vcpu);
771#endif
772
773
774 debug = vcpu->arch.dbg_reg;
775 switch_booke_debug_regs(&debug);
776 debug = current->thread.debug;
777 current->thread.debug = vcpu->arch.dbg_reg;
778
779 vcpu->arch.pgdir = current->mm->pgd;
780 kvmppc_fix_ee_before_entry();
781
782 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
783
784
785
786
787
788 switch_booke_debug_regs(&debug);
789 current->thread.debug = debug;
790
791#ifdef CONFIG_PPC_FPU
792 kvmppc_save_guest_fp(vcpu);
793#endif
794
795#ifdef CONFIG_ALTIVEC
796 kvmppc_save_guest_altivec(vcpu);
797#endif
798
799out:
800 vcpu->mode = OUTSIDE_GUEST_MODE;
801 return ret;
802}
803
804static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
805{
806 enum emulation_result er;
807
808 er = kvmppc_emulate_instruction(run, vcpu);
809 switch (er) {
810 case EMULATE_DONE:
811
812 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
813
814
815 return RESUME_GUEST_NV;
816
817 case EMULATE_AGAIN:
818 return RESUME_GUEST;
819
820 case EMULATE_FAIL:
821 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
822 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
823
824
825 run->hw.hardware_exit_reason = ~0ULL << 32;
826 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
827 kvmppc_core_queue_program(vcpu, ESR_PIL);
828 return RESUME_HOST;
829
830 case EMULATE_EXIT_USER:
831 return RESUME_HOST;
832
833 default:
834 BUG();
835 }
836}
837
838static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
839{
840 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
841 u32 dbsr = vcpu->arch.dbsr;
842
843 if (vcpu->guest_debug == 0) {
844
845
846
847
848 if (dbsr & DBSR_IDE) {
849 dbsr &= ~DBSR_IDE;
850 if (!dbsr)
851 return RESUME_GUEST;
852 }
853
854 if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
855 (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
856 kvmppc_core_queue_debug(vcpu);
857
858
859 if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
860 kvmppc_core_queue_program(vcpu, ESR_PTR);
861
862 return RESUME_GUEST;
863 }
864
865
866
867
868
869 vcpu->arch.dbsr = 0;
870 run->debug.arch.status = 0;
871 run->debug.arch.address = vcpu->arch.pc;
872
873 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
874 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
875 } else {
876 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
877 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
878 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
879 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
880 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
881 run->debug.arch.address = dbg_reg->dac1;
882 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
883 run->debug.arch.address = dbg_reg->dac2;
884 }
885
886 return RESUME_HOST;
887}
888
889static void kvmppc_fill_pt_regs(struct pt_regs *regs)
890{
891 ulong r1, ip, msr, lr;
892
893 asm("mr %0, 1" : "=r"(r1));
894 asm("mflr %0" : "=r"(lr));
895 asm("mfmsr %0" : "=r"(msr));
896 asm("bl 1f; 1: mflr %0" : "=r"(ip));
897
898 memset(regs, 0, sizeof(*regs));
899 regs->gpr[1] = r1;
900 regs->nip = ip;
901 regs->msr = msr;
902 regs->link = lr;
903}
904
905
906
907
908
909
910
911static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
912 unsigned int exit_nr)
913{
914 struct pt_regs regs;
915
916 switch (exit_nr) {
917 case BOOKE_INTERRUPT_EXTERNAL:
918 kvmppc_fill_pt_regs(®s);
919 do_IRQ(®s);
920 break;
921 case BOOKE_INTERRUPT_DECREMENTER:
922 kvmppc_fill_pt_regs(®s);
923 timer_interrupt(®s);
924 break;
925#if defined(CONFIG_PPC_DOORBELL)
926 case BOOKE_INTERRUPT_DOORBELL:
927 kvmppc_fill_pt_regs(®s);
928 doorbell_exception(®s);
929 break;
930#endif
931 case BOOKE_INTERRUPT_MACHINE_CHECK:
932
933 break;
934 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
935 kvmppc_fill_pt_regs(®s);
936 performance_monitor_exception(®s);
937 break;
938 case BOOKE_INTERRUPT_WATCHDOG:
939 kvmppc_fill_pt_regs(®s);
940#ifdef CONFIG_BOOKE_WDT
941 WatchdogException(®s);
942#else
943 unknown_exception(®s);
944#endif
945 break;
946 case BOOKE_INTERRUPT_CRITICAL:
947 kvmppc_fill_pt_regs(®s);
948 unknown_exception(®s);
949 break;
950 case BOOKE_INTERRUPT_DEBUG:
951
952 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
953 kvmppc_clear_dbsr();
954 break;
955 }
956}
957
958static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
959 enum emulation_result emulated, u32 last_inst)
960{
961 switch (emulated) {
962 case EMULATE_AGAIN:
963 return RESUME_GUEST;
964
965 case EMULATE_FAIL:
966 pr_debug("%s: load instruction from guest address %lx failed\n",
967 __func__, vcpu->arch.pc);
968
969
970 run->hw.hardware_exit_reason = ~0ULL << 32;
971 run->hw.hardware_exit_reason |= last_inst;
972 kvmppc_core_queue_program(vcpu, ESR_PIL);
973 return RESUME_HOST;
974
975 default:
976 BUG();
977 }
978}
979
980
981
982
983
984
985int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
986 unsigned int exit_nr)
987{
988 int r = RESUME_HOST;
989 int s;
990 int idx;
991 u32 last_inst = KVM_INST_FETCH_FAILED;
992 enum emulation_result emulated = EMULATE_DONE;
993
994
995 kvmppc_update_timing_stats(vcpu);
996
997
998 kvmppc_restart_interrupt(vcpu, exit_nr);
999
1000
1001
1002
1003
1004 switch (exit_nr) {
1005 case BOOKE_INTERRUPT_DATA_STORAGE:
1006 case BOOKE_INTERRUPT_DTLB_MISS:
1007 case BOOKE_INTERRUPT_HV_PRIV:
1008 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1009 break;
1010 case BOOKE_INTERRUPT_PROGRAM:
1011
1012 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1013 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1014 break;
1015 default:
1016 break;
1017 }
1018
1019 trace_kvm_exit(exit_nr, vcpu);
1020 guest_exit_irqoff();
1021
1022 local_irq_enable();
1023
1024 run->exit_reason = KVM_EXIT_UNKNOWN;
1025 run->ready_for_interrupt_injection = 1;
1026
1027 if (emulated != EMULATE_DONE) {
1028 r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst);
1029 goto out;
1030 }
1031
1032 switch (exit_nr) {
1033 case BOOKE_INTERRUPT_MACHINE_CHECK:
1034 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
1035 kvmppc_dump_vcpu(vcpu);
1036
1037 run->hw.hardware_exit_reason = ~1ULL << 32;
1038 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
1039 r = RESUME_HOST;
1040 break;
1041
1042 case BOOKE_INTERRUPT_EXTERNAL:
1043 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
1044 r = RESUME_GUEST;
1045 break;
1046
1047 case BOOKE_INTERRUPT_DECREMENTER:
1048 kvmppc_account_exit(vcpu, DEC_EXITS);
1049 r = RESUME_GUEST;
1050 break;
1051
1052 case BOOKE_INTERRUPT_WATCHDOG:
1053 r = RESUME_GUEST;
1054 break;
1055
1056 case BOOKE_INTERRUPT_DOORBELL:
1057 kvmppc_account_exit(vcpu, DBELL_EXITS);
1058 r = RESUME_GUEST;
1059 break;
1060
1061 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
1062 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1063
1064
1065
1066
1067
1068
1069 r = RESUME_GUEST;
1070 break;
1071
1072 case BOOKE_INTERRUPT_GUEST_DBELL:
1073 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1074
1075
1076
1077
1078
1079
1080 r = RESUME_GUEST;
1081 break;
1082
1083 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
1084 r = RESUME_GUEST;
1085 break;
1086
1087 case BOOKE_INTERRUPT_HV_PRIV:
1088 r = emulation_exit(run, vcpu);
1089 break;
1090
1091 case BOOKE_INTERRUPT_PROGRAM:
1092 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
1093 (last_inst == KVMPPC_INST_SW_BREAKPOINT)) {
1094
1095
1096
1097
1098 r = kvmppc_handle_debug(run, vcpu);
1099 run->exit_reason = KVM_EXIT_DEBUG;
1100 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1101 break;
1102 }
1103
1104 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
1105
1106
1107
1108
1109
1110
1111
1112
1113 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
1114 r = RESUME_GUEST;
1115 kvmppc_account_exit(vcpu, USR_PR_INST);
1116 break;
1117 }
1118
1119 r = emulation_exit(run, vcpu);
1120 break;
1121
1122 case BOOKE_INTERRUPT_FP_UNAVAIL:
1123 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
1124 kvmppc_account_exit(vcpu, FP_UNAVAIL);
1125 r = RESUME_GUEST;
1126 break;
1127
1128#ifdef CONFIG_SPE
1129 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
1130 if (vcpu->arch.shared->msr & MSR_SPE)
1131 kvmppc_vcpu_enable_spe(vcpu);
1132 else
1133 kvmppc_booke_queue_irqprio(vcpu,
1134 BOOKE_IRQPRIO_SPE_UNAVAIL);
1135 r = RESUME_GUEST;
1136 break;
1137 }
1138
1139 case BOOKE_INTERRUPT_SPE_FP_DATA:
1140 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1141 r = RESUME_GUEST;
1142 break;
1143
1144 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1145 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1146 r = RESUME_GUEST;
1147 break;
1148#elif defined(CONFIG_SPE_POSSIBLE)
1149 case BOOKE_INTERRUPT_SPE_UNAVAIL:
1150
1151
1152
1153
1154 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1155 r = RESUME_GUEST;
1156 break;
1157
1158
1159
1160
1161
1162 case BOOKE_INTERRUPT_SPE_FP_DATA:
1163 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1164 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
1165 __func__, exit_nr, vcpu->arch.pc);
1166 run->hw.hardware_exit_reason = exit_nr;
1167 r = RESUME_HOST;
1168 break;
1169#endif
1170
1171
1172
1173
1174
1175#ifdef CONFIG_ALTIVEC
1176 case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
1177 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1178 r = RESUME_GUEST;
1179 break;
1180
1181 case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
1182 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1183 r = RESUME_GUEST;
1184 break;
1185#endif
1186
1187 case BOOKE_INTERRUPT_DATA_STORAGE:
1188 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1189 vcpu->arch.fault_esr);
1190 kvmppc_account_exit(vcpu, DSI_EXITS);
1191 r = RESUME_GUEST;
1192 break;
1193
1194 case BOOKE_INTERRUPT_INST_STORAGE:
1195 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
1196 kvmppc_account_exit(vcpu, ISI_EXITS);
1197 r = RESUME_GUEST;
1198 break;
1199
1200 case BOOKE_INTERRUPT_ALIGNMENT:
1201 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1202 vcpu->arch.fault_esr);
1203 r = RESUME_GUEST;
1204 break;
1205
1206#ifdef CONFIG_KVM_BOOKE_HV
1207 case BOOKE_INTERRUPT_HV_SYSCALL:
1208 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1209 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1210 } else {
1211
1212
1213
1214
1215 kvmppc_core_queue_program(vcpu, ESR_PPR);
1216 }
1217
1218 r = RESUME_GUEST;
1219 break;
1220#else
1221 case BOOKE_INTERRUPT_SYSCALL:
1222 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1223 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1224
1225 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1226 r = RESUME_GUEST;
1227 } else {
1228
1229 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1230 }
1231 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
1232 r = RESUME_GUEST;
1233 break;
1234#endif
1235
1236 case BOOKE_INTERRUPT_DTLB_MISS: {
1237 unsigned long eaddr = vcpu->arch.fault_dear;
1238 int gtlb_index;
1239 gpa_t gpaddr;
1240 gfn_t gfn;
1241
1242#ifdef CONFIG_KVM_E500V2
1243 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1244 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1245 kvmppc_map_magic(vcpu);
1246 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1247 r = RESUME_GUEST;
1248
1249 break;
1250 }
1251#endif
1252
1253
1254 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1255 if (gtlb_index < 0) {
1256
1257 kvmppc_core_queue_dtlb_miss(vcpu,
1258 vcpu->arch.fault_dear,
1259 vcpu->arch.fault_esr);
1260 kvmppc_mmu_dtlb_miss(vcpu);
1261 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
1262 r = RESUME_GUEST;
1263 break;
1264 }
1265
1266 idx = srcu_read_lock(&vcpu->kvm->srcu);
1267
1268 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1269 gfn = gpaddr >> PAGE_SHIFT;
1270
1271 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1272
1273
1274
1275
1276
1277
1278 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1279 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1280 r = RESUME_GUEST;
1281 } else {
1282
1283
1284 vcpu->arch.paddr_accessed = gpaddr;
1285 vcpu->arch.vaddr_accessed = eaddr;
1286 r = kvmppc_emulate_mmio(run, vcpu);
1287 kvmppc_account_exit(vcpu, MMIO_EXITS);
1288 }
1289
1290 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1291 break;
1292 }
1293
1294 case BOOKE_INTERRUPT_ITLB_MISS: {
1295 unsigned long eaddr = vcpu->arch.pc;
1296 gpa_t gpaddr;
1297 gfn_t gfn;
1298 int gtlb_index;
1299
1300 r = RESUME_GUEST;
1301
1302
1303 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1304 if (gtlb_index < 0) {
1305
1306 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
1307 kvmppc_mmu_itlb_miss(vcpu);
1308 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
1309 break;
1310 }
1311
1312 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
1313
1314 idx = srcu_read_lock(&vcpu->kvm->srcu);
1315
1316 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1317 gfn = gpaddr >> PAGE_SHIFT;
1318
1319 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1320
1321
1322
1323
1324
1325
1326 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1327 } else {
1328
1329 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
1330 }
1331
1332 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1333 break;
1334 }
1335
1336 case BOOKE_INTERRUPT_DEBUG: {
1337 r = kvmppc_handle_debug(run, vcpu);
1338 if (r == RESUME_HOST)
1339 run->exit_reason = KVM_EXIT_DEBUG;
1340 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1341 break;
1342 }
1343
1344 default:
1345 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1346 BUG();
1347 }
1348
1349out:
1350
1351
1352
1353
1354 if (!(r & RESUME_HOST)) {
1355 s = kvmppc_prepare_to_enter(vcpu);
1356 if (s <= 0)
1357 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
1358 else {
1359
1360 kvmppc_fix_ee_before_entry();
1361 kvmppc_load_guest_fp(vcpu);
1362 kvmppc_load_guest_altivec(vcpu);
1363 }
1364 }
1365
1366 return r;
1367}
1368
1369static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1370{
1371 u32 old_tsr = vcpu->arch.tsr;
1372
1373 vcpu->arch.tsr = new_tsr;
1374
1375 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1376 arm_next_watchdog(vcpu);
1377
1378 update_timer_ints(vcpu);
1379}
1380
1381
1382int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1383{
1384 int i;
1385 int r;
1386
1387 vcpu->arch.pc = 0;
1388 vcpu->arch.shared->pir = vcpu->vcpu_id;
1389 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8);
1390 kvmppc_set_msr(vcpu, 0);
1391
1392#ifndef CONFIG_KVM_BOOKE_HV
1393 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
1394 vcpu->arch.shadow_pid = 1;
1395 vcpu->arch.shared->msr = 0;
1396#endif
1397
1398
1399
1400 vcpu->arch.ivpr = 0x55550000;
1401 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1402 vcpu->arch.ivor[i] = 0x7700 | i * 4;
1403
1404 kvmppc_init_timing_stats(vcpu);
1405
1406 r = kvmppc_core_vcpu_setup(vcpu);
1407 kvmppc_sanity_check(vcpu);
1408 return r;
1409}
1410
1411int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1412{
1413
1414 spin_lock_init(&vcpu->arch.wdt_lock);
1415 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1416 (unsigned long)vcpu);
1417
1418
1419
1420
1421
1422 mtspr(SPRN_DBSR, DBSR_MRR);
1423 return 0;
1424}
1425
1426void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1427{
1428 del_timer_sync(&vcpu->arch.wdt_timer);
1429}
1430
1431int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1432{
1433 int i;
1434
1435 regs->pc = vcpu->arch.pc;
1436 regs->cr = kvmppc_get_cr(vcpu);
1437 regs->ctr = vcpu->arch.ctr;
1438 regs->lr = vcpu->arch.lr;
1439 regs->xer = kvmppc_get_xer(vcpu);
1440 regs->msr = vcpu->arch.shared->msr;
1441 regs->srr0 = kvmppc_get_srr0(vcpu);
1442 regs->srr1 = kvmppc_get_srr1(vcpu);
1443 regs->pid = vcpu->arch.pid;
1444 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1445 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1446 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1447 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1448 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1449 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1450 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1451 regs->sprg7 = kvmppc_get_sprg7(vcpu);
1452
1453 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1454 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
1455
1456 return 0;
1457}
1458
1459int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1460{
1461 int i;
1462
1463 vcpu->arch.pc = regs->pc;
1464 kvmppc_set_cr(vcpu, regs->cr);
1465 vcpu->arch.ctr = regs->ctr;
1466 vcpu->arch.lr = regs->lr;
1467 kvmppc_set_xer(vcpu, regs->xer);
1468 kvmppc_set_msr(vcpu, regs->msr);
1469 kvmppc_set_srr0(vcpu, regs->srr0);
1470 kvmppc_set_srr1(vcpu, regs->srr1);
1471 kvmppc_set_pid(vcpu, regs->pid);
1472 kvmppc_set_sprg0(vcpu, regs->sprg0);
1473 kvmppc_set_sprg1(vcpu, regs->sprg1);
1474 kvmppc_set_sprg2(vcpu, regs->sprg2);
1475 kvmppc_set_sprg3(vcpu, regs->sprg3);
1476 kvmppc_set_sprg4(vcpu, regs->sprg4);
1477 kvmppc_set_sprg5(vcpu, regs->sprg5);
1478 kvmppc_set_sprg6(vcpu, regs->sprg6);
1479 kvmppc_set_sprg7(vcpu, regs->sprg7);
1480
1481 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1482 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1483
1484 return 0;
1485}
1486
1487static void get_sregs_base(struct kvm_vcpu *vcpu,
1488 struct kvm_sregs *sregs)
1489{
1490 u64 tb = get_tb();
1491
1492 sregs->u.e.features |= KVM_SREGS_E_BASE;
1493
1494 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1495 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1496 sregs->u.e.mcsr = vcpu->arch.mcsr;
1497 sregs->u.e.esr = kvmppc_get_esr(vcpu);
1498 sregs->u.e.dear = kvmppc_get_dar(vcpu);
1499 sregs->u.e.tsr = vcpu->arch.tsr;
1500 sregs->u.e.tcr = vcpu->arch.tcr;
1501 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1502 sregs->u.e.tb = tb;
1503 sregs->u.e.vrsave = vcpu->arch.vrsave;
1504}
1505
1506static int set_sregs_base(struct kvm_vcpu *vcpu,
1507 struct kvm_sregs *sregs)
1508{
1509 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1510 return 0;
1511
1512 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1513 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1514 vcpu->arch.mcsr = sregs->u.e.mcsr;
1515 kvmppc_set_esr(vcpu, sregs->u.e.esr);
1516 kvmppc_set_dar(vcpu, sregs->u.e.dear);
1517 vcpu->arch.vrsave = sregs->u.e.vrsave;
1518 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1519
1520 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
1521 vcpu->arch.dec = sregs->u.e.dec;
1522 kvmppc_emulate_dec(vcpu);
1523 }
1524
1525 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1526 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
1527
1528 return 0;
1529}
1530
1531static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1532 struct kvm_sregs *sregs)
1533{
1534 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1535
1536 sregs->u.e.pir = vcpu->vcpu_id;
1537 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1538 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1539 sregs->u.e.decar = vcpu->arch.decar;
1540 sregs->u.e.ivpr = vcpu->arch.ivpr;
1541}
1542
1543static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1544 struct kvm_sregs *sregs)
1545{
1546 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1547 return 0;
1548
1549 if (sregs->u.e.pir != vcpu->vcpu_id)
1550 return -EINVAL;
1551
1552 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1553 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1554 vcpu->arch.decar = sregs->u.e.decar;
1555 vcpu->arch.ivpr = sregs->u.e.ivpr;
1556
1557 return 0;
1558}
1559
1560int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1561{
1562 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1563
1564 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1565 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1566 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1567 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1568 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1569 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1570 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1571 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1572 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1573 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1574 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1575 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1576 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1577 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1578 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1579 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1580 return 0;
1581}
1582
1583int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1584{
1585 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1586 return 0;
1587
1588 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1589 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1590 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1591 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1592 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1593 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1594 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1595 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1596 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1597 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1598 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1599 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1600 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1601 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1602 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1603 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1604
1605 return 0;
1606}
1607
1608int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1609 struct kvm_sregs *sregs)
1610{
1611 sregs->pvr = vcpu->arch.pvr;
1612
1613 get_sregs_base(vcpu, sregs);
1614 get_sregs_arch206(vcpu, sregs);
1615 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1616}
1617
1618int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1619 struct kvm_sregs *sregs)
1620{
1621 int ret;
1622
1623 if (vcpu->arch.pvr != sregs->pvr)
1624 return -EINVAL;
1625
1626 ret = set_sregs_base(vcpu, sregs);
1627 if (ret < 0)
1628 return ret;
1629
1630 ret = set_sregs_arch206(vcpu, sregs);
1631 if (ret < 0)
1632 return ret;
1633
1634 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1635}
1636
1637int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1638 union kvmppc_one_reg *val)
1639{
1640 int r = 0;
1641
1642 switch (id) {
1643 case KVM_REG_PPC_IAC1:
1644 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
1645 break;
1646 case KVM_REG_PPC_IAC2:
1647 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
1648 break;
1649#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1650 case KVM_REG_PPC_IAC3:
1651 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
1652 break;
1653 case KVM_REG_PPC_IAC4:
1654 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
1655 break;
1656#endif
1657 case KVM_REG_PPC_DAC1:
1658 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
1659 break;
1660 case KVM_REG_PPC_DAC2:
1661 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
1662 break;
1663 case KVM_REG_PPC_EPR: {
1664 u32 epr = kvmppc_get_epr(vcpu);
1665 *val = get_reg_val(id, epr);
1666 break;
1667 }
1668#if defined(CONFIG_64BIT)
1669 case KVM_REG_PPC_EPCR:
1670 *val = get_reg_val(id, vcpu->arch.epcr);
1671 break;
1672#endif
1673 case KVM_REG_PPC_TCR:
1674 *val = get_reg_val(id, vcpu->arch.tcr);
1675 break;
1676 case KVM_REG_PPC_TSR:
1677 *val = get_reg_val(id, vcpu->arch.tsr);
1678 break;
1679 case KVM_REG_PPC_DEBUG_INST:
1680 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1681 break;
1682 case KVM_REG_PPC_VRSAVE:
1683 *val = get_reg_val(id, vcpu->arch.vrsave);
1684 break;
1685 default:
1686 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
1687 break;
1688 }
1689
1690 return r;
1691}
1692
1693int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1694 union kvmppc_one_reg *val)
1695{
1696 int r = 0;
1697
1698 switch (id) {
1699 case KVM_REG_PPC_IAC1:
1700 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
1701 break;
1702 case KVM_REG_PPC_IAC2:
1703 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
1704 break;
1705#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1706 case KVM_REG_PPC_IAC3:
1707 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
1708 break;
1709 case KVM_REG_PPC_IAC4:
1710 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
1711 break;
1712#endif
1713 case KVM_REG_PPC_DAC1:
1714 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
1715 break;
1716 case KVM_REG_PPC_DAC2:
1717 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
1718 break;
1719 case KVM_REG_PPC_EPR: {
1720 u32 new_epr = set_reg_val(id, *val);
1721 kvmppc_set_epr(vcpu, new_epr);
1722 break;
1723 }
1724#if defined(CONFIG_64BIT)
1725 case KVM_REG_PPC_EPCR: {
1726 u32 new_epcr = set_reg_val(id, *val);
1727 kvmppc_set_epcr(vcpu, new_epcr);
1728 break;
1729 }
1730#endif
1731 case KVM_REG_PPC_OR_TSR: {
1732 u32 tsr_bits = set_reg_val(id, *val);
1733 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1734 break;
1735 }
1736 case KVM_REG_PPC_CLEAR_TSR: {
1737 u32 tsr_bits = set_reg_val(id, *val);
1738 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1739 break;
1740 }
1741 case KVM_REG_PPC_TSR: {
1742 u32 tsr = set_reg_val(id, *val);
1743 kvmppc_set_tsr(vcpu, tsr);
1744 break;
1745 }
1746 case KVM_REG_PPC_TCR: {
1747 u32 tcr = set_reg_val(id, *val);
1748 kvmppc_set_tcr(vcpu, tcr);
1749 break;
1750 }
1751 case KVM_REG_PPC_VRSAVE:
1752 vcpu->arch.vrsave = set_reg_val(id, *val);
1753 break;
1754 default:
1755 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
1756 break;
1757 }
1758
1759 return r;
1760}
1761
1762int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1763{
1764 return -ENOTSUPP;
1765}
1766
1767int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1768{
1769 return -ENOTSUPP;
1770}
1771
1772int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1773 struct kvm_translation *tr)
1774{
1775 int r;
1776
1777 r = kvmppc_core_vcpu_translate(vcpu, tr);
1778 return r;
1779}
1780
1781int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1782{
1783 return -ENOTSUPP;
1784}
1785
1786void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1787 struct kvm_memory_slot *dont)
1788{
1789}
1790
1791int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1792 unsigned long npages)
1793{
1794 return 0;
1795}
1796
1797int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1798 struct kvm_memory_slot *memslot,
1799 const struct kvm_userspace_memory_region *mem)
1800{
1801 return 0;
1802}
1803
1804void kvmppc_core_commit_memory_region(struct kvm *kvm,
1805 const struct kvm_userspace_memory_region *mem,
1806 const struct kvm_memory_slot *old,
1807 const struct kvm_memory_slot *new)
1808{
1809}
1810
1811void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1812{
1813}
1814
1815void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1816{
1817#if defined(CONFIG_64BIT)
1818 vcpu->arch.epcr = new_epcr;
1819#ifdef CONFIG_KVM_BOOKE_HV
1820 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1821 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1822 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1823#endif
1824#endif
1825}
1826
1827void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1828{
1829 vcpu->arch.tcr = new_tcr;
1830 arm_next_watchdog(vcpu);
1831 update_timer_ints(vcpu);
1832}
1833
1834void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1835{
1836 set_bits(tsr_bits, &vcpu->arch.tsr);
1837 smp_wmb();
1838 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1839 kvm_vcpu_kick(vcpu);
1840}
1841
1842void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1843{
1844 clear_bits(tsr_bits, &vcpu->arch.tsr);
1845
1846
1847
1848
1849
1850 if (tsr_bits & (TSR_ENW | TSR_WIS))
1851 arm_next_watchdog(vcpu);
1852
1853 update_timer_ints(vcpu);
1854}
1855
1856void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
1857{
1858 if (vcpu->arch.tcr & TCR_ARE) {
1859 vcpu->arch.dec = vcpu->arch.decar;
1860 kvmppc_emulate_dec(vcpu);
1861 }
1862
1863 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1864}
1865
1866static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1867 uint64_t addr, int index)
1868{
1869 switch (index) {
1870 case 0:
1871 dbg_reg->dbcr0 |= DBCR0_IAC1;
1872 dbg_reg->iac1 = addr;
1873 break;
1874 case 1:
1875 dbg_reg->dbcr0 |= DBCR0_IAC2;
1876 dbg_reg->iac2 = addr;
1877 break;
1878#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1879 case 2:
1880 dbg_reg->dbcr0 |= DBCR0_IAC3;
1881 dbg_reg->iac3 = addr;
1882 break;
1883 case 3:
1884 dbg_reg->dbcr0 |= DBCR0_IAC4;
1885 dbg_reg->iac4 = addr;
1886 break;
1887#endif
1888 default:
1889 return -EINVAL;
1890 }
1891
1892 dbg_reg->dbcr0 |= DBCR0_IDM;
1893 return 0;
1894}
1895
1896static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1897 int type, int index)
1898{
1899 switch (index) {
1900 case 0:
1901 if (type & KVMPPC_DEBUG_WATCH_READ)
1902 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1903 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1904 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1905 dbg_reg->dac1 = addr;
1906 break;
1907 case 1:
1908 if (type & KVMPPC_DEBUG_WATCH_READ)
1909 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1910 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1911 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1912 dbg_reg->dac2 = addr;
1913 break;
1914 default:
1915 return -EINVAL;
1916 }
1917
1918 dbg_reg->dbcr0 |= DBCR0_IDM;
1919 return 0;
1920}
1921void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1922{
1923
1924#ifdef CONFIG_KVM_BOOKE_HV
1925 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1926 if (set) {
1927 if (prot_bitmap & MSR_UCLE)
1928 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1929 if (prot_bitmap & MSR_DE)
1930 vcpu->arch.shadow_msrp |= MSRP_DEP;
1931 if (prot_bitmap & MSR_PMM)
1932 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1933 } else {
1934 if (prot_bitmap & MSR_UCLE)
1935 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1936 if (prot_bitmap & MSR_DE)
1937 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1938 if (prot_bitmap & MSR_PMM)
1939 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1940 }
1941#endif
1942}
1943
1944int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1945 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
1946{
1947 int gtlb_index;
1948 gpa_t gpaddr;
1949
1950#ifdef CONFIG_KVM_E500V2
1951 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1952 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1953 pte->eaddr = eaddr;
1954 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1955 (eaddr & ~PAGE_MASK);
1956 pte->vpage = eaddr >> PAGE_SHIFT;
1957 pte->may_read = true;
1958 pte->may_write = true;
1959 pte->may_execute = true;
1960
1961 return 0;
1962 }
1963#endif
1964
1965
1966 switch (xlid) {
1967 case XLATE_INST:
1968 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1969 break;
1970 case XLATE_DATA:
1971 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1972 break;
1973 default:
1974 BUG();
1975 }
1976
1977
1978 if (gtlb_index < 0)
1979 return -ENOENT;
1980
1981 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1982
1983 pte->eaddr = eaddr;
1984 pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
1985 pte->vpage = eaddr >> PAGE_SHIFT;
1986
1987
1988 pte->may_read = true;
1989 pte->may_write = true;
1990 pte->may_execute = true;
1991
1992 return 0;
1993}
1994
1995int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1996 struct kvm_guest_debug *dbg)
1997{
1998 struct debug_reg *dbg_reg;
1999 int n, b = 0, w = 0;
2000
2001 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
2002 vcpu->arch.dbg_reg.dbcr0 = 0;
2003 vcpu->guest_debug = 0;
2004 kvm_guest_protect_msr(vcpu, MSR_DE, false);
2005 return 0;
2006 }
2007
2008 kvm_guest_protect_msr(vcpu, MSR_DE, true);
2009 vcpu->guest_debug = dbg->control;
2010 vcpu->arch.dbg_reg.dbcr0 = 0;
2011
2012 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
2013 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2014
2015
2016 dbg_reg = &(vcpu->arch.dbg_reg);
2017
2018#ifdef CONFIG_KVM_BOOKE_HV
2019
2020
2021
2022
2023 dbg_reg->dbcr1 = 0;
2024 dbg_reg->dbcr2 = 0;
2025#else
2026
2027
2028
2029
2030
2031 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
2032 DBCR1_IAC4US;
2033 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
2034#endif
2035
2036 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2037 return 0;
2038
2039 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
2040 uint64_t addr = dbg->arch.bp[n].addr;
2041 uint32_t type = dbg->arch.bp[n].type;
2042
2043 if (type == KVMPPC_DEBUG_NONE)
2044 continue;
2045
2046 if (type & ~(KVMPPC_DEBUG_WATCH_READ |
2047 KVMPPC_DEBUG_WATCH_WRITE |
2048 KVMPPC_DEBUG_BREAKPOINT))
2049 return -EINVAL;
2050
2051 if (type & KVMPPC_DEBUG_BREAKPOINT) {
2052
2053 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
2054 return -EINVAL;
2055 } else {
2056
2057 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
2058 type, w++))
2059 return -EINVAL;
2060 }
2061 }
2062
2063 return 0;
2064}
2065
2066void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2067{
2068 vcpu->cpu = smp_processor_id();
2069 current->thread.kvm_vcpu = vcpu;
2070}
2071
2072void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
2073{
2074 current->thread.kvm_vcpu = NULL;
2075 vcpu->cpu = -1;
2076
2077
2078 kvmppc_clear_dbsr();
2079}
2080
2081void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
2082{
2083 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
2084}
2085
2086int kvmppc_core_init_vm(struct kvm *kvm)
2087{
2088 return kvm->arch.kvm_ops->init_vm(kvm);
2089}
2090
2091struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
2092{
2093 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
2094}
2095
2096void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
2097{
2098 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2099}
2100
2101void kvmppc_core_destroy_vm(struct kvm *kvm)
2102{
2103 kvm->arch.kvm_ops->destroy_vm(kvm);
2104}
2105
2106void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2107{
2108 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
2109}
2110
2111void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
2112{
2113 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
2114}
2115
2116int __init kvmppc_booke_init(void)
2117{
2118#ifndef CONFIG_KVM_BOOKE_HV
2119 unsigned long ivor[16];
2120 unsigned long *handler = kvmppc_booke_handler_addr;
2121 unsigned long max_ivor = 0;
2122 unsigned long handler_len;
2123 int i;
2124
2125
2126
2127 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
2128 VCPU_SIZE_ORDER);
2129 if (!kvmppc_booke_handlers)
2130 return -ENOMEM;
2131
2132
2133
2134
2135
2136 ivor[0] = mfspr(SPRN_IVOR0);
2137 ivor[1] = mfspr(SPRN_IVOR1);
2138 ivor[2] = mfspr(SPRN_IVOR2);
2139 ivor[3] = mfspr(SPRN_IVOR3);
2140 ivor[4] = mfspr(SPRN_IVOR4);
2141 ivor[5] = mfspr(SPRN_IVOR5);
2142 ivor[6] = mfspr(SPRN_IVOR6);
2143 ivor[7] = mfspr(SPRN_IVOR7);
2144 ivor[8] = mfspr(SPRN_IVOR8);
2145 ivor[9] = mfspr(SPRN_IVOR9);
2146 ivor[10] = mfspr(SPRN_IVOR10);
2147 ivor[11] = mfspr(SPRN_IVOR11);
2148 ivor[12] = mfspr(SPRN_IVOR12);
2149 ivor[13] = mfspr(SPRN_IVOR13);
2150 ivor[14] = mfspr(SPRN_IVOR14);
2151 ivor[15] = mfspr(SPRN_IVOR15);
2152
2153 for (i = 0; i < 16; i++) {
2154 if (ivor[i] > max_ivor)
2155 max_ivor = i;
2156
2157 handler_len = handler[i + 1] - handler[i];
2158 memcpy((void *)kvmppc_booke_handlers + ivor[i],
2159 (void *)handler[i], handler_len);
2160 }
2161
2162 handler_len = handler[max_ivor + 1] - handler[max_ivor];
2163 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
2164 ivor[max_ivor] + handler_len);
2165#endif
2166 return 0;
2167}
2168
2169void __exit kvmppc_booke_exit(void)
2170{
2171 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
2172 kvm_exit();
2173}
2174