1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/percpu.h>
26#include <linux/fs.h>
27#include <linux/slab.h>
28#include <linux/smp.h>
29#include <linux/kvm_host.h>
30#include <linux/kvm.h>
31#include <linux/bitops.h>
32#include <linux/hrtimer.h>
33#include <linux/uaccess.h>
34#include <linux/iommu.h>
35#include <linux/intel-iommu.h>
36
37#include <asm/pgtable.h>
38#include <asm/gcc_intrin.h>
39#include <asm/pal.h>
40#include <asm/cacheflush.h>
41#include <asm/div64.h>
42#include <asm/tlb.h>
43#include <asm/elf.h>
44#include <asm/sn/addrs.h>
45#include <asm/sn/clksupport.h>
46#include <asm/sn/shub_mmr.h>
47
48#include "misc.h"
49#include "vti.h"
50#include "iodev.h"
51#include "ioapic.h"
52#include "lapic.h"
53#include "irq.h"
54
55static unsigned long kvm_vmm_base;
56static unsigned long kvm_vsa_base;
57static unsigned long kvm_vm_buffer;
58static unsigned long kvm_vm_buffer_size;
59unsigned long kvm_vmm_gp;
60
61static long vp_env_info;
62
63static struct kvm_vmm_info *kvm_vmm_info;
64
65static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
66
67struct kvm_stats_debugfs_item debugfs_entries[] = {
68 { NULL }
69};
70
71static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu)
72{
73#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
74 if (vcpu->kvm->arch.is_sn2)
75 return rtc_time();
76 else
77#endif
78 return ia64_getreg(_IA64_REG_AR_ITC);
79}
80
81static void kvm_flush_icache(unsigned long start, unsigned long len)
82{
83 int l;
84
85 for (l = 0; l < (len + 32); l += 32)
86 ia64_fc((void *)(start + l));
87
88 ia64_sync_i();
89 ia64_srlz_i();
90}
91
92static void kvm_flush_tlb_all(void)
93{
94 unsigned long i, j, count0, count1, stride0, stride1, addr;
95 long flags;
96
97 addr = local_cpu_data->ptce_base;
98 count0 = local_cpu_data->ptce_count[0];
99 count1 = local_cpu_data->ptce_count[1];
100 stride0 = local_cpu_data->ptce_stride[0];
101 stride1 = local_cpu_data->ptce_stride[1];
102
103 local_irq_save(flags);
104 for (i = 0; i < count0; ++i) {
105 for (j = 0; j < count1; ++j) {
106 ia64_ptce(addr);
107 addr += stride1;
108 }
109 addr += stride0;
110 }
111 local_irq_restore(flags);
112 ia64_srlz_i();
113}
114
115long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
116{
117 struct ia64_pal_retval iprv;
118
119 PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
120 (u64)opt_handler);
121
122 return iprv.status;
123}
124
125static DEFINE_SPINLOCK(vp_lock);
126
127int kvm_arch_hardware_enable(void *garbage)
128{
129 long status;
130 long tmp_base;
131 unsigned long pte;
132 unsigned long saved_psr;
133 int slot;
134
135 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
136 local_irq_save(saved_psr);
137 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
138 local_irq_restore(saved_psr);
139 if (slot < 0)
140 return -EINVAL;
141
142 spin_lock(&vp_lock);
143 status = ia64_pal_vp_init_env(kvm_vsa_base ?
144 VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
146 if (status != 0) {
147 spin_unlock(&vp_lock);
148 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
149 return -EINVAL;
150 }
151
152 if (!kvm_vsa_base) {
153 kvm_vsa_base = tmp_base;
154 printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
155 }
156 spin_unlock(&vp_lock);
157 ia64_ptr_entry(0x3, slot);
158
159 return 0;
160}
161
162void kvm_arch_hardware_disable(void *garbage)
163{
164
165 long status;
166 int slot;
167 unsigned long pte;
168 unsigned long saved_psr;
169 unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
170
171 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
172 PAGE_KERNEL));
173
174 local_irq_save(saved_psr);
175 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
176 local_irq_restore(saved_psr);
177 if (slot < 0)
178 return;
179
180 status = ia64_pal_vp_exit_env(host_iva);
181 if (status)
182 printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
183 status);
184 ia64_ptr_entry(0x3, slot);
185}
186
187void kvm_arch_check_processor_compat(void *rtn)
188{
189 *(int *)rtn = 0;
190}
191
192int kvm_dev_ioctl_check_extension(long ext)
193{
194
195 int r;
196
197 switch (ext) {
198 case KVM_CAP_IRQCHIP:
199 case KVM_CAP_MP_STATE:
200 case KVM_CAP_IRQ_INJECT_STATUS:
201 r = 1;
202 break;
203 case KVM_CAP_COALESCED_MMIO:
204 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
205 break;
206 case KVM_CAP_IOMMU:
207 r = iommu_found();
208 break;
209 default:
210 r = 0;
211 }
212 return r;
213
214}
215
216static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
217{
218 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
219 kvm_run->hw.hardware_exit_reason = 1;
220 return 0;
221}
222
223static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
224{
225 struct kvm_mmio_req *p;
226 struct kvm_io_device *mmio_dev;
227 int r;
228
229 p = kvm_get_vcpu_ioreq(vcpu);
230
231 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
232 goto mmio;
233 vcpu->mmio_needed = 1;
234 vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
235 vcpu->mmio_size = kvm_run->mmio.len = p->size;
236 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
237
238 if (vcpu->mmio_is_write)
239 memcpy(vcpu->mmio_data, &p->data, p->size);
240 memcpy(kvm_run->mmio.data, &p->data, p->size);
241 kvm_run->exit_reason = KVM_EXIT_MMIO;
242 return 0;
243mmio:
244 if (p->dir)
245 r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr,
246 p->size, &p->data);
247 else
248 r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr,
249 p->size, &p->data);
250 if (r)
251 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
252 p->state = STATE_IORESP_READY;
253
254 return 1;
255}
256
257static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
258{
259 struct exit_ctl_data *p;
260
261 p = kvm_get_exit_data(vcpu);
262
263 if (p->exit_reason == EXIT_REASON_PAL_CALL)
264 return kvm_pal_emul(vcpu, kvm_run);
265 else {
266 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
267 kvm_run->hw.hardware_exit_reason = 2;
268 return 0;
269 }
270}
271
272static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
273{
274 struct exit_ctl_data *p;
275
276 p = kvm_get_exit_data(vcpu);
277
278 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
279 kvm_sal_emul(vcpu);
280 return 1;
281 } else {
282 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
283 kvm_run->hw.hardware_exit_reason = 3;
284 return 0;
285 }
286
287}
288
289static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
290{
291 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
292
293 if (!test_and_set_bit(vector, &vpd->irr[0])) {
294 vcpu->arch.irq_new_pending = 1;
295 kvm_vcpu_kick(vcpu);
296 return 1;
297 }
298 return 0;
299}
300
301
302
303
304
305static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
306 uint64_t vector)
307{
308 switch (dm) {
309 case SAPIC_FIXED:
310 break;
311 case SAPIC_NMI:
312 vector = 2;
313 break;
314 case SAPIC_EXTINT:
315 vector = 0;
316 break;
317 case SAPIC_INIT:
318 case SAPIC_PMI:
319 default:
320 printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
321 return;
322 }
323 __apic_accept_irq(vcpu, vector);
324}
325
326static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
327 unsigned long eid)
328{
329 union ia64_lid lid;
330 int i;
331 struct kvm_vcpu *vcpu;
332
333 kvm_for_each_vcpu(i, vcpu, kvm) {
334 lid.val = VCPU_LID(vcpu);
335 if (lid.id == id && lid.eid == eid)
336 return vcpu;
337 }
338
339 return NULL;
340}
341
342static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
343{
344 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
345 struct kvm_vcpu *target_vcpu;
346 struct kvm_pt_regs *regs;
347 union ia64_ipi_a addr = p->u.ipi_data.addr;
348 union ia64_ipi_d data = p->u.ipi_data.data;
349
350 target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
351 if (!target_vcpu)
352 return handle_vm_error(vcpu, kvm_run);
353
354 if (!target_vcpu->arch.launched) {
355 regs = vcpu_regs(target_vcpu);
356
357 regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
358 regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
359
360 target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
361 if (waitqueue_active(&target_vcpu->wq))
362 wake_up_interruptible(&target_vcpu->wq);
363 } else {
364 vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
365 if (target_vcpu != vcpu)
366 kvm_vcpu_kick(target_vcpu);
367 }
368
369 return 1;
370}
371
372struct call_data {
373 struct kvm_ptc_g ptc_g_data;
374 struct kvm_vcpu *vcpu;
375};
376
377static void vcpu_global_purge(void *info)
378{
379 struct call_data *p = (struct call_data *)info;
380 struct kvm_vcpu *vcpu = p->vcpu;
381
382 if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
383 return;
384
385 set_bit(KVM_REQ_PTC_G, &vcpu->requests);
386 if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
387 vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
388 p->ptc_g_data;
389 } else {
390 clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
391 vcpu->arch.ptc_g_count = 0;
392 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
393 }
394}
395
396static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
397{
398 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
399 struct kvm *kvm = vcpu->kvm;
400 struct call_data call_data;
401 int i;
402 struct kvm_vcpu *vcpui;
403
404 call_data.ptc_g_data = p->u.ptc_g_data;
405
406 kvm_for_each_vcpu(i, vcpui, kvm) {
407 if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
408 vcpu == vcpui)
409 continue;
410
411 if (waitqueue_active(&vcpui->wq))
412 wake_up_interruptible(&vcpui->wq);
413
414 if (vcpui->cpu != -1) {
415 call_data.vcpu = vcpui;
416 smp_call_function_single(vcpui->cpu,
417 vcpu_global_purge, &call_data, 1);
418 } else
419 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
420
421 }
422 return 1;
423}
424
425static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
426{
427 return 1;
428}
429
430static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu)
431{
432 unsigned long pte, rtc_phys_addr, map_addr;
433 int slot;
434
435 map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT);
436 rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC;
437 pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC));
438 slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT);
439 vcpu->arch.sn_rtc_tr_slot = slot;
440 if (slot < 0) {
441 printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n");
442 slot = 0;
443 }
444 return slot;
445}
446
447int kvm_emulate_halt(struct kvm_vcpu *vcpu)
448{
449
450 ktime_t kt;
451 long itc_diff;
452 unsigned long vcpu_now_itc;
453 unsigned long expires;
454 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
455 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
456 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
457
458 if (irqchip_in_kernel(vcpu->kvm)) {
459
460 vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset;
461
462 if (time_after(vcpu_now_itc, vpd->itm)) {
463 vcpu->arch.timer_check = 1;
464 return 1;
465 }
466 itc_diff = vpd->itm - vcpu_now_itc;
467 if (itc_diff < 0)
468 itc_diff = -itc_diff;
469
470 expires = div64_u64(itc_diff, cyc_per_usec);
471 kt = ktime_set(0, 1000 * expires);
472
473 vcpu->arch.ht_active = 1;
474 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
475
476 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
477 kvm_vcpu_block(vcpu);
478 hrtimer_cancel(p_ht);
479 vcpu->arch.ht_active = 0;
480
481 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) ||
482 kvm_cpu_has_pending_timer(vcpu))
483 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
484 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
485
486 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
487 return -EINTR;
488 return 1;
489 } else {
490 printk(KERN_ERR"kvm: Unsupported userspace halt!");
491 return 0;
492 }
493}
494
495static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
496 struct kvm_run *kvm_run)
497{
498 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
499 return 0;
500}
501
502static int handle_external_interrupt(struct kvm_vcpu *vcpu,
503 struct kvm_run *kvm_run)
504{
505 return 1;
506}
507
508static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
509 struct kvm_run *kvm_run)
510{
511 printk("VMM: %s", vcpu->arch.log_buf);
512 return 1;
513}
514
515static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
516 struct kvm_run *kvm_run) = {
517 [EXIT_REASON_VM_PANIC] = handle_vm_error,
518 [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
519 [EXIT_REASON_PAL_CALL] = handle_pal_call,
520 [EXIT_REASON_SAL_CALL] = handle_sal_call,
521 [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
522 [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
523 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
524 [EXIT_REASON_IPI] = handle_ipi,
525 [EXIT_REASON_PTC_G] = handle_global_purge,
526 [EXIT_REASON_DEBUG] = handle_vcpu_debug,
527
528};
529
530static const int kvm_vti_max_exit_handlers =
531 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
532
533static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
534{
535 struct exit_ctl_data *p_exit_data;
536
537 p_exit_data = kvm_get_exit_data(vcpu);
538 return p_exit_data->exit_reason;
539}
540
541
542
543
544
545static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
546{
547 u32 exit_reason = kvm_get_exit_reason(vcpu);
548 vcpu->arch.last_exit = exit_reason;
549
550 if (exit_reason < kvm_vti_max_exit_handlers
551 && kvm_vti_exit_handlers[exit_reason])
552 return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
553 else {
554 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
555 kvm_run->hw.hardware_exit_reason = exit_reason;
556 }
557 return 0;
558}
559
560static inline void vti_set_rr6(unsigned long rr6)
561{
562 ia64_set_rr(RR6, rr6);
563 ia64_srlz_i();
564}
565
566static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
567{
568 unsigned long pte;
569 struct kvm *kvm = vcpu->kvm;
570 int r;
571
572
573 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
574 r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
575 if (r < 0)
576 goto out;
577 vcpu->arch.vmm_tr_slot = r;
578
579 pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
580 r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
581 pte, KVM_VM_DATA_SHIFT);
582 if (r < 0)
583 goto out;
584 vcpu->arch.vm_tr_slot = r;
585
586#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
587 if (kvm->arch.is_sn2) {
588 r = kvm_sn2_setup_mappings(vcpu);
589 if (r < 0)
590 goto out;
591 }
592#endif
593
594 r = 0;
595out:
596 return r;
597}
598
599static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
600{
601 struct kvm *kvm = vcpu->kvm;
602 ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
603 ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
604#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
605 if (kvm->arch.is_sn2)
606 ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot);
607#endif
608}
609
610static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
611{
612 unsigned long psr;
613 int r;
614 int cpu = smp_processor_id();
615
616 if (vcpu->arch.last_run_cpu != cpu ||
617 per_cpu(last_vcpu, cpu) != vcpu) {
618 per_cpu(last_vcpu, cpu) = vcpu;
619 vcpu->arch.last_run_cpu = cpu;
620 kvm_flush_tlb_all();
621 }
622
623 vcpu->arch.host_rr6 = ia64_get_rr(RR6);
624 vti_set_rr6(vcpu->arch.vmm_rr);
625 local_irq_save(psr);
626 r = kvm_insert_vmm_mapping(vcpu);
627 local_irq_restore(psr);
628 return r;
629}
630
631static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
632{
633 kvm_purge_vmm_mapping(vcpu);
634 vti_set_rr6(vcpu->arch.host_rr6);
635}
636
637static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
638{
639 union context *host_ctx, *guest_ctx;
640 int r, idx;
641
642 idx = srcu_read_lock(&vcpu->kvm->srcu);
643
644again:
645 if (signal_pending(current)) {
646 r = -EINTR;
647 kvm_run->exit_reason = KVM_EXIT_INTR;
648 goto out;
649 }
650
651 preempt_disable();
652 local_irq_disable();
653
654
655 host_ctx = kvm_get_host_context(vcpu);
656 guest_ctx = kvm_get_guest_context(vcpu);
657
658 clear_bit(KVM_REQ_KICK, &vcpu->requests);
659
660 r = kvm_vcpu_pre_transition(vcpu);
661 if (r < 0)
662 goto vcpu_run_fail;
663
664 srcu_read_unlock(&vcpu->kvm->srcu, idx);
665 kvm_guest_enter();
666
667
668
669
670 kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
671
672 kvm_vcpu_post_transition(vcpu);
673
674 vcpu->arch.launched = 1;
675 set_bit(KVM_REQ_KICK, &vcpu->requests);
676 local_irq_enable();
677
678
679
680
681
682
683
684 barrier();
685 kvm_guest_exit();
686 preempt_enable();
687
688 idx = srcu_read_lock(&vcpu->kvm->srcu);
689
690 r = kvm_handle_exit(kvm_run, vcpu);
691
692 if (r > 0) {
693 if (!need_resched())
694 goto again;
695 }
696
697out:
698 srcu_read_unlock(&vcpu->kvm->srcu, idx);
699 if (r > 0) {
700 kvm_resched(vcpu);
701 idx = srcu_read_lock(&vcpu->kvm->srcu);
702 goto again;
703 }
704
705 return r;
706
707vcpu_run_fail:
708 local_irq_enable();
709 preempt_enable();
710 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
711 goto out;
712}
713
714static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
715{
716 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
717
718 if (!vcpu->mmio_is_write)
719 memcpy(&p->data, vcpu->mmio_data, 8);
720 p->state = STATE_IORESP_READY;
721}
722
723int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
724{
725 int r;
726 sigset_t sigsaved;
727
728 if (vcpu->sigset_active)
729 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
730
731 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
732 kvm_vcpu_block(vcpu);
733 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
734 r = -EAGAIN;
735 goto out;
736 }
737
738 if (vcpu->mmio_needed) {
739 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
740 kvm_set_mmio_data(vcpu);
741 vcpu->mmio_read_completed = 1;
742 vcpu->mmio_needed = 0;
743 }
744 r = __vcpu_run(vcpu, kvm_run);
745out:
746 if (vcpu->sigset_active)
747 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
748
749 return r;
750}
751
752struct kvm *kvm_arch_alloc_vm(void)
753{
754
755 struct kvm *kvm;
756 uint64_t vm_base;
757
758 BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
759
760 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
761
762 if (!vm_base)
763 return NULL;
764
765 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
766 kvm = (struct kvm *)(vm_base +
767 offsetof(struct kvm_vm_data, kvm_vm_struct));
768 kvm->arch.vm_base = vm_base;
769 printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
770
771 return kvm;
772}
773
774struct kvm_io_range {
775 unsigned long start;
776 unsigned long size;
777 unsigned long type;
778};
779
780static const struct kvm_io_range io_ranges[] = {
781 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
782 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
783 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
784 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
785 {PIB_START, PIB_SIZE, GPFN_PIB},
786};
787
788static void kvm_build_io_pmt(struct kvm *kvm)
789{
790 unsigned long i, j;
791
792
793 for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
794 i++) {
795 for (j = io_ranges[i].start;
796 j < io_ranges[i].start + io_ranges[i].size;
797 j += PAGE_SIZE)
798 kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
799 io_ranges[i].type, 0);
800 }
801
802}
803
804
805#define GUEST_PHYSICAL_RR0 0x1739
806#define GUEST_PHYSICAL_RR4 0x2739
807#define VMM_INIT_RR 0x1660
808
809int kvm_arch_init_vm(struct kvm *kvm)
810{
811 BUG_ON(!kvm);
812
813 kvm->arch.is_sn2 = ia64_platform_is("sn2");
814
815 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
816 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
817 kvm->arch.vmm_init_rr = VMM_INIT_RR;
818
819
820
821
822 kvm_build_io_pmt(kvm);
823
824 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
825
826
827 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
828
829 return 0;
830}
831
832static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
833 struct kvm_irqchip *chip)
834{
835 int r;
836
837 r = 0;
838 switch (chip->chip_id) {
839 case KVM_IRQCHIP_IOAPIC:
840 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
841 break;
842 default:
843 r = -EINVAL;
844 break;
845 }
846 return r;
847}
848
849static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
850{
851 int r;
852
853 r = 0;
854 switch (chip->chip_id) {
855 case KVM_IRQCHIP_IOAPIC:
856 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
857 break;
858 default:
859 r = -EINVAL;
860 break;
861 }
862 return r;
863}
864
865#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
866
867int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
868{
869 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
870 int i;
871
872 for (i = 0; i < 16; i++) {
873 vpd->vgr[i] = regs->vpd.vgr[i];
874 vpd->vbgr[i] = regs->vpd.vbgr[i];
875 }
876 for (i = 0; i < 128; i++)
877 vpd->vcr[i] = regs->vpd.vcr[i];
878 vpd->vhpi = regs->vpd.vhpi;
879 vpd->vnat = regs->vpd.vnat;
880 vpd->vbnat = regs->vpd.vbnat;
881 vpd->vpsr = regs->vpd.vpsr;
882
883 vpd->vpr = regs->vpd.vpr;
884
885 memcpy(&vcpu->arch.guest, ®s->saved_guest, sizeof(union context));
886
887 RESTORE_REGS(mp_state);
888 RESTORE_REGS(vmm_rr);
889 memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
890 memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
891 RESTORE_REGS(itr_regions);
892 RESTORE_REGS(dtr_regions);
893 RESTORE_REGS(tc_regions);
894 RESTORE_REGS(irq_check);
895 RESTORE_REGS(itc_check);
896 RESTORE_REGS(timer_check);
897 RESTORE_REGS(timer_pending);
898 RESTORE_REGS(last_itc);
899 for (i = 0; i < 8; i++) {
900 vcpu->arch.vrr[i] = regs->vrr[i];
901 vcpu->arch.ibr[i] = regs->ibr[i];
902 vcpu->arch.dbr[i] = regs->dbr[i];
903 }
904 for (i = 0; i < 4; i++)
905 vcpu->arch.insvc[i] = regs->insvc[i];
906 RESTORE_REGS(xtp);
907 RESTORE_REGS(metaphysical_rr0);
908 RESTORE_REGS(metaphysical_rr4);
909 RESTORE_REGS(metaphysical_saved_rr0);
910 RESTORE_REGS(metaphysical_saved_rr4);
911 RESTORE_REGS(fp_psr);
912 RESTORE_REGS(saved_gp);
913
914 vcpu->arch.irq_new_pending = 1;
915 vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
916 set_bit(KVM_REQ_RESUME, &vcpu->requests);
917
918 return 0;
919}
920
921long kvm_arch_vm_ioctl(struct file *filp,
922 unsigned int ioctl, unsigned long arg)
923{
924 struct kvm *kvm = filp->private_data;
925 void __user *argp = (void __user *)arg;
926 int r = -ENOTTY;
927
928 switch (ioctl) {
929 case KVM_SET_MEMORY_REGION: {
930 struct kvm_memory_region kvm_mem;
931 struct kvm_userspace_memory_region kvm_userspace_mem;
932
933 r = -EFAULT;
934 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
935 goto out;
936 kvm_userspace_mem.slot = kvm_mem.slot;
937 kvm_userspace_mem.flags = kvm_mem.flags;
938 kvm_userspace_mem.guest_phys_addr =
939 kvm_mem.guest_phys_addr;
940 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
941 r = kvm_vm_ioctl_set_memory_region(kvm,
942 &kvm_userspace_mem, 0);
943 if (r)
944 goto out;
945 break;
946 }
947 case KVM_CREATE_IRQCHIP:
948 r = -EFAULT;
949 r = kvm_ioapic_init(kvm);
950 if (r)
951 goto out;
952 r = kvm_setup_default_irq_routing(kvm);
953 if (r) {
954 mutex_lock(&kvm->slots_lock);
955 kvm_ioapic_destroy(kvm);
956 mutex_unlock(&kvm->slots_lock);
957 goto out;
958 }
959 break;
960 case KVM_IRQ_LINE_STATUS:
961 case KVM_IRQ_LINE: {
962 struct kvm_irq_level irq_event;
963
964 r = -EFAULT;
965 if (copy_from_user(&irq_event, argp, sizeof irq_event))
966 goto out;
967 r = -ENXIO;
968 if (irqchip_in_kernel(kvm)) {
969 __s32 status;
970 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
971 irq_event.irq, irq_event.level);
972 if (ioctl == KVM_IRQ_LINE_STATUS) {
973 r = -EFAULT;
974 irq_event.status = status;
975 if (copy_to_user(argp, &irq_event,
976 sizeof irq_event))
977 goto out;
978 }
979 r = 0;
980 }
981 break;
982 }
983 case KVM_GET_IRQCHIP: {
984
985 struct kvm_irqchip chip;
986
987 r = -EFAULT;
988 if (copy_from_user(&chip, argp, sizeof chip))
989 goto out;
990 r = -ENXIO;
991 if (!irqchip_in_kernel(kvm))
992 goto out;
993 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
994 if (r)
995 goto out;
996 r = -EFAULT;
997 if (copy_to_user(argp, &chip, sizeof chip))
998 goto out;
999 r = 0;
1000 break;
1001 }
1002 case KVM_SET_IRQCHIP: {
1003
1004 struct kvm_irqchip chip;
1005
1006 r = -EFAULT;
1007 if (copy_from_user(&chip, argp, sizeof chip))
1008 goto out;
1009 r = -ENXIO;
1010 if (!irqchip_in_kernel(kvm))
1011 goto out;
1012 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1013 if (r)
1014 goto out;
1015 r = 0;
1016 break;
1017 }
1018 default:
1019 ;
1020 }
1021out:
1022 return r;
1023}
1024
1025int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1026 struct kvm_sregs *sregs)
1027{
1028 return -EINVAL;
1029}
1030
1031int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1032 struct kvm_sregs *sregs)
1033{
1034 return -EINVAL;
1035
1036}
1037int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1038 struct kvm_translation *tr)
1039{
1040
1041 return -EINVAL;
1042}
1043
1044static int kvm_alloc_vmm_area(void)
1045{
1046 if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
1047 kvm_vmm_base = __get_free_pages(GFP_KERNEL,
1048 get_order(KVM_VMM_SIZE));
1049 if (!kvm_vmm_base)
1050 return -ENOMEM;
1051
1052 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1053 kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
1054
1055 printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
1056 kvm_vmm_base, kvm_vm_buffer);
1057 }
1058
1059 return 0;
1060}
1061
1062static void kvm_free_vmm_area(void)
1063{
1064 if (kvm_vmm_base) {
1065
1066 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1067 free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
1068 kvm_vmm_base = 0;
1069 kvm_vm_buffer = 0;
1070 kvm_vsa_base = 0;
1071 }
1072}
1073
1074static int vti_init_vpd(struct kvm_vcpu *vcpu)
1075{
1076 int i;
1077 union cpuid3_t cpuid3;
1078 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1079
1080 if (IS_ERR(vpd))
1081 return PTR_ERR(vpd);
1082
1083
1084 for (i = 0; i < 5; i++)
1085 vpd->vcpuid[i] = ia64_get_cpuid(i);
1086
1087
1088 cpuid3.value = vpd->vcpuid[3];
1089 cpuid3.number = 4;
1090 vpd->vcpuid[3] = cpuid3.value;
1091
1092
1093 vpd->vac.a_from_int_cr = 1;
1094 vpd->vac.a_to_int_cr = 1;
1095 vpd->vac.a_from_psr = 1;
1096 vpd->vac.a_from_cpuid = 1;
1097 vpd->vac.a_cover = 1;
1098 vpd->vac.a_bsw = 1;
1099 vpd->vac.a_int = 1;
1100 vpd->vdc.d_vmsw = 1;
1101
1102
1103 vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
1104
1105 return 0;
1106}
1107
1108static int vti_create_vp(struct kvm_vcpu *vcpu)
1109{
1110 long ret;
1111 struct vpd *vpd = vcpu->arch.vpd;
1112 unsigned long vmm_ivt;
1113
1114 vmm_ivt = kvm_vmm_info->vmm_ivt;
1115
1116 printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
1117
1118 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
1119
1120 if (ret) {
1121 printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
1122 return -EINVAL;
1123 }
1124 return 0;
1125}
1126
1127static void init_ptce_info(struct kvm_vcpu *vcpu)
1128{
1129 ia64_ptce_info_t ptce = {0};
1130
1131 ia64_get_ptce(&ptce);
1132 vcpu->arch.ptce_base = ptce.base;
1133 vcpu->arch.ptce_count[0] = ptce.count[0];
1134 vcpu->arch.ptce_count[1] = ptce.count[1];
1135 vcpu->arch.ptce_stride[0] = ptce.stride[0];
1136 vcpu->arch.ptce_stride[1] = ptce.stride[1];
1137}
1138
1139static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
1140{
1141 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
1142
1143 if (hrtimer_cancel(p_ht))
1144 hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS);
1145}
1146
1147static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
1148{
1149 struct kvm_vcpu *vcpu;
1150 wait_queue_head_t *q;
1151
1152 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
1153 q = &vcpu->wq;
1154
1155 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
1156 goto out;
1157
1158 if (waitqueue_active(q))
1159 wake_up_interruptible(q);
1160
1161out:
1162 vcpu->arch.timer_fired = 1;
1163 vcpu->arch.timer_check = 1;
1164 return HRTIMER_NORESTART;
1165}
1166
1167#define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1168
1169int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1170{
1171 struct kvm_vcpu *v;
1172 int r;
1173 int i;
1174 long itc_offset;
1175 struct kvm *kvm = vcpu->kvm;
1176 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1177
1178 union context *p_ctx = &vcpu->arch.guest;
1179 struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
1180
1181
1182 if (IS_ERR(vmm_vcpu))
1183 return PTR_ERR(vmm_vcpu);
1184
1185 if (kvm_vcpu_is_bsp(vcpu)) {
1186 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1187
1188
1189 regs->cr_iip = PALE_RESET_ENTRY;
1190
1191
1192 itc_offset = 0UL - kvm_get_itc(vcpu);
1193 for (i = 0; i < KVM_MAX_VCPUS; i++) {
1194 v = (struct kvm_vcpu *)((char *)vcpu +
1195 sizeof(struct kvm_vcpu_data) * i);
1196 v->arch.itc_offset = itc_offset;
1197 v->arch.last_itc = 0;
1198 }
1199 } else
1200 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
1201
1202 r = -ENOMEM;
1203 vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
1204 if (!vcpu->arch.apic)
1205 goto out;
1206 vcpu->arch.apic->vcpu = vcpu;
1207
1208 p_ctx->gr[1] = 0;
1209 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
1210 p_ctx->gr[13] = (unsigned long)vmm_vcpu;
1211 p_ctx->psr = 0x1008522000UL;
1212 p_ctx->ar[40] = FPSR_DEFAULT;
1213 p_ctx->caller_unat = 0;
1214 p_ctx->pr = 0x0;
1215 p_ctx->ar[36] = 0x0;
1216 p_ctx->ar[19] = 0x0;
1217 p_ctx->ar[18] = (unsigned long)vmm_vcpu +
1218 ((sizeof(struct kvm_vcpu)+15) & ~15);
1219 p_ctx->ar[64] = 0x0;
1220 p_ctx->cr[0] = 0x7e04UL;
1221 p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
1222 p_ctx->cr[8] = 0x3c;
1223
1224
1225 p_ctx->rr[0] = 0x30;
1226 p_ctx->rr[1] = 0x30;
1227 p_ctx->rr[2] = 0x30;
1228 p_ctx->rr[3] = 0x30;
1229 p_ctx->rr[4] = 0x30;
1230 p_ctx->rr[5] = 0x30;
1231 p_ctx->rr[7] = 0x30;
1232
1233
1234 p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
1235
1236 vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
1237 vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
1238 vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
1239
1240 hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1241 vcpu->arch.hlt_timer.function = hlt_timer_fn;
1242
1243 vcpu->arch.last_run_cpu = -1;
1244 vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
1245 vcpu->arch.vsa_base = kvm_vsa_base;
1246 vcpu->arch.__gp = kvm_vmm_gp;
1247 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
1248 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
1249 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
1250 init_ptce_info(vcpu);
1251
1252 r = 0;
1253out:
1254 return r;
1255}
1256
1257static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
1258{
1259 unsigned long psr;
1260 int r;
1261
1262 local_irq_save(psr);
1263 r = kvm_insert_vmm_mapping(vcpu);
1264 local_irq_restore(psr);
1265 if (r)
1266 goto fail;
1267 r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
1268 if (r)
1269 goto fail;
1270
1271 r = vti_init_vpd(vcpu);
1272 if (r) {
1273 printk(KERN_DEBUG"kvm: vpd init error!!\n");
1274 goto uninit;
1275 }
1276
1277 r = vti_create_vp(vcpu);
1278 if (r)
1279 goto uninit;
1280
1281 kvm_purge_vmm_mapping(vcpu);
1282
1283 return 0;
1284uninit:
1285 kvm_vcpu_uninit(vcpu);
1286fail:
1287 return r;
1288}
1289
1290struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1291 unsigned int id)
1292{
1293 struct kvm_vcpu *vcpu;
1294 unsigned long vm_base = kvm->arch.vm_base;
1295 int r;
1296 int cpu;
1297
1298 BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
1299
1300 r = -EINVAL;
1301 if (id >= KVM_MAX_VCPUS) {
1302 printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
1303 KVM_MAX_VCPUS);
1304 goto fail;
1305 }
1306
1307 r = -ENOMEM;
1308 if (!vm_base) {
1309 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
1310 goto fail;
1311 }
1312 vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
1313 vcpu_data[id].vcpu_struct));
1314 vcpu->kvm = kvm;
1315
1316 cpu = get_cpu();
1317 r = vti_vcpu_setup(vcpu, id);
1318 put_cpu();
1319
1320 if (r) {
1321 printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
1322 goto fail;
1323 }
1324
1325 return vcpu;
1326fail:
1327 return ERR_PTR(r);
1328}
1329
1330int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1331{
1332 return 0;
1333}
1334
1335int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1336{
1337 return -EINVAL;
1338}
1339
1340int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1341{
1342 return -EINVAL;
1343}
1344
1345int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1346 struct kvm_guest_debug *dbg)
1347{
1348 return -EINVAL;
1349}
1350
1351void kvm_arch_free_vm(struct kvm *kvm)
1352{
1353 unsigned long vm_base = kvm->arch.vm_base;
1354
1355 if (vm_base) {
1356 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
1357 free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
1358 }
1359
1360}
1361
1362static void kvm_release_vm_pages(struct kvm *kvm)
1363{
1364 struct kvm_memslots *slots;
1365 struct kvm_memory_slot *memslot;
1366 int i, j;
1367 unsigned long base_gfn;
1368
1369 slots = kvm_memslots(kvm);
1370 for (i = 0; i < slots->nmemslots; i++) {
1371 memslot = &slots->memslots[i];
1372 base_gfn = memslot->base_gfn;
1373
1374 for (j = 0; j < memslot->npages; j++) {
1375 if (memslot->rmap[j])
1376 put_page((struct page *)memslot->rmap[j]);
1377 }
1378 }
1379}
1380
1381void kvm_arch_sync_events(struct kvm *kvm)
1382{
1383}
1384
1385void kvm_arch_destroy_vm(struct kvm *kvm)
1386{
1387 kvm_iommu_unmap_guest(kvm);
1388#ifdef KVM_CAP_DEVICE_ASSIGNMENT
1389 kvm_free_all_assigned_devices(kvm);
1390#endif
1391 kfree(kvm->arch.vioapic);
1392 kvm_release_vm_pages(kvm);
1393}
1394
1395void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1396{
1397}
1398
1399void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1400{
1401 if (cpu != vcpu->cpu) {
1402 vcpu->cpu = cpu;
1403 if (vcpu->arch.ht_active)
1404 kvm_migrate_hlt_timer(vcpu);
1405 }
1406}
1407
1408#define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1409
1410int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1411{
1412 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1413 int i;
1414
1415 vcpu_load(vcpu);
1416
1417 for (i = 0; i < 16; i++) {
1418 regs->vpd.vgr[i] = vpd->vgr[i];
1419 regs->vpd.vbgr[i] = vpd->vbgr[i];
1420 }
1421 for (i = 0; i < 128; i++)
1422 regs->vpd.vcr[i] = vpd->vcr[i];
1423 regs->vpd.vhpi = vpd->vhpi;
1424 regs->vpd.vnat = vpd->vnat;
1425 regs->vpd.vbnat = vpd->vbnat;
1426 regs->vpd.vpsr = vpd->vpsr;
1427 regs->vpd.vpr = vpd->vpr;
1428
1429 memcpy(®s->saved_guest, &vcpu->arch.guest, sizeof(union context));
1430
1431 SAVE_REGS(mp_state);
1432 SAVE_REGS(vmm_rr);
1433 memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
1434 memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
1435 SAVE_REGS(itr_regions);
1436 SAVE_REGS(dtr_regions);
1437 SAVE_REGS(tc_regions);
1438 SAVE_REGS(irq_check);
1439 SAVE_REGS(itc_check);
1440 SAVE_REGS(timer_check);
1441 SAVE_REGS(timer_pending);
1442 SAVE_REGS(last_itc);
1443 for (i = 0; i < 8; i++) {
1444 regs->vrr[i] = vcpu->arch.vrr[i];
1445 regs->ibr[i] = vcpu->arch.ibr[i];
1446 regs->dbr[i] = vcpu->arch.dbr[i];
1447 }
1448 for (i = 0; i < 4; i++)
1449 regs->insvc[i] = vcpu->arch.insvc[i];
1450 regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu);
1451 SAVE_REGS(xtp);
1452 SAVE_REGS(metaphysical_rr0);
1453 SAVE_REGS(metaphysical_rr4);
1454 SAVE_REGS(metaphysical_saved_rr0);
1455 SAVE_REGS(metaphysical_saved_rr4);
1456 SAVE_REGS(fp_psr);
1457 SAVE_REGS(saved_gp);
1458
1459 vcpu_put(vcpu);
1460 return 0;
1461}
1462
1463int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
1464 struct kvm_ia64_vcpu_stack *stack)
1465{
1466 memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
1467 return 0;
1468}
1469
1470int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
1471 struct kvm_ia64_vcpu_stack *stack)
1472{
1473 memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
1474 sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
1475
1476 vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
1477 return 0;
1478}
1479
1480void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1481{
1482
1483 hrtimer_cancel(&vcpu->arch.hlt_timer);
1484 kfree(vcpu->arch.apic);
1485}
1486
1487
1488long kvm_arch_vcpu_ioctl(struct file *filp,
1489 unsigned int ioctl, unsigned long arg)
1490{
1491 struct kvm_vcpu *vcpu = filp->private_data;
1492 void __user *argp = (void __user *)arg;
1493 struct kvm_ia64_vcpu_stack *stack = NULL;
1494 long r;
1495
1496 switch (ioctl) {
1497 case KVM_IA64_VCPU_GET_STACK: {
1498 struct kvm_ia64_vcpu_stack __user *user_stack;
1499 void __user *first_p = argp;
1500
1501 r = -EFAULT;
1502 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1503 goto out;
1504
1505 if (!access_ok(VERIFY_WRITE, user_stack,
1506 sizeof(struct kvm_ia64_vcpu_stack))) {
1507 printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
1508 "Illegal user destination address for stack\n");
1509 goto out;
1510 }
1511 stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1512 if (!stack) {
1513 r = -ENOMEM;
1514 goto out;
1515 }
1516
1517 r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
1518 if (r)
1519 goto out;
1520
1521 if (copy_to_user(user_stack, stack,
1522 sizeof(struct kvm_ia64_vcpu_stack))) {
1523 r = -EFAULT;
1524 goto out;
1525 }
1526
1527 break;
1528 }
1529 case KVM_IA64_VCPU_SET_STACK: {
1530 struct kvm_ia64_vcpu_stack __user *user_stack;
1531 void __user *first_p = argp;
1532
1533 r = -EFAULT;
1534 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1535 goto out;
1536
1537 if (!access_ok(VERIFY_READ, user_stack,
1538 sizeof(struct kvm_ia64_vcpu_stack))) {
1539 printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
1540 "Illegal user address for stack\n");
1541 goto out;
1542 }
1543 stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1544 if (!stack) {
1545 r = -ENOMEM;
1546 goto out;
1547 }
1548 if (copy_from_user(stack, user_stack,
1549 sizeof(struct kvm_ia64_vcpu_stack)))
1550 goto out;
1551
1552 r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
1553 break;
1554 }
1555
1556 default:
1557 r = -EINVAL;
1558 }
1559
1560out:
1561 kfree(stack);
1562 return r;
1563}
1564
1565int kvm_arch_prepare_memory_region(struct kvm *kvm,
1566 struct kvm_memory_slot *memslot,
1567 struct kvm_memory_slot old,
1568 struct kvm_userspace_memory_region *mem,
1569 int user_alloc)
1570{
1571 unsigned long i;
1572 unsigned long pfn;
1573 int npages = memslot->npages;
1574 unsigned long base_gfn = memslot->base_gfn;
1575
1576 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
1577 return -ENOMEM;
1578
1579 for (i = 0; i < npages; i++) {
1580 pfn = gfn_to_pfn(kvm, base_gfn + i);
1581 if (!kvm_is_mmio_pfn(pfn)) {
1582 kvm_set_pmt_entry(kvm, base_gfn + i,
1583 pfn << PAGE_SHIFT,
1584 _PAGE_AR_RWX | _PAGE_MA_WB);
1585 memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
1586 } else {
1587 kvm_set_pmt_entry(kvm, base_gfn + i,
1588 GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
1589 _PAGE_MA_UC);
1590 memslot->rmap[i] = 0;
1591 }
1592 }
1593
1594 return 0;
1595}
1596
1597void kvm_arch_commit_memory_region(struct kvm *kvm,
1598 struct kvm_userspace_memory_region *mem,
1599 struct kvm_memory_slot old,
1600 int user_alloc)
1601{
1602 return;
1603}
1604
1605void kvm_arch_flush_shadow(struct kvm *kvm)
1606{
1607 kvm_flush_remote_tlbs(kvm);
1608}
1609
1610long kvm_arch_dev_ioctl(struct file *filp,
1611 unsigned int ioctl, unsigned long arg)
1612{
1613 return -EINVAL;
1614}
1615
1616void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1617{
1618 kvm_vcpu_uninit(vcpu);
1619}
1620
1621static int vti_cpu_has_kvm_support(void)
1622{
1623 long avail = 1, status = 1, control = 1;
1624 long ret;
1625
1626 ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
1627 if (ret)
1628 goto out;
1629
1630 if (!(avail & PAL_PROC_VM_BIT))
1631 goto out;
1632
1633 printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
1634
1635 ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
1636 if (ret)
1637 goto out;
1638 printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
1639
1640 if (!(vp_env_info & VP_OPCODE)) {
1641 printk(KERN_WARNING"kvm: No opcode ability on hardware, "
1642 "vm_env_info:0x%lx\n", vp_env_info);
1643 }
1644
1645 return 1;
1646out:
1647 return 0;
1648}
1649
1650
1651
1652
1653
1654
1655static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info,
1656 struct module *module)
1657{
1658 unsigned long new_ar, new_ar_sn2;
1659 unsigned long module_base;
1660
1661 if (!ia64_platform_is("sn2"))
1662 return;
1663
1664 module_base = (unsigned long)module->module_core;
1665
1666 new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base;
1667 new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base;
1668
1669 printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC "
1670 "as source\n");
1671
1672
1673
1674
1675
1676 memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60);
1677}
1678
1679static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
1680 struct module *module)
1681{
1682 unsigned long module_base;
1683 unsigned long vmm_size;
1684
1685 unsigned long vmm_offset, func_offset, fdesc_offset;
1686 struct fdesc *p_fdesc;
1687
1688 BUG_ON(!module);
1689
1690 if (!kvm_vmm_base) {
1691 printk("kvm: kvm area hasn't been initialized yet!!\n");
1692 return -EFAULT;
1693 }
1694
1695
1696 module_base = (unsigned long)module->module_core;
1697 vmm_size = module->core_size;
1698 if (unlikely(vmm_size > KVM_VMM_SIZE))
1699 return -EFAULT;
1700
1701 memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
1702 kvm_patch_vmm(vmm_info, module);
1703 kvm_flush_icache(kvm_vmm_base, vmm_size);
1704
1705
1706 vmm_offset = vmm_info->vmm_ivt - module_base;
1707 kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
1708 printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
1709 kvm_vmm_info->vmm_ivt);
1710
1711 fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
1712 kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
1713 fdesc_offset);
1714 func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
1715 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1716 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1717 p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
1718
1719 printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
1720 KVM_VMM_BASE+func_offset);
1721
1722 fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
1723 kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
1724 fdesc_offset);
1725 func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
1726 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1727 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1728 p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
1729
1730 kvm_vmm_gp = p_fdesc->gp;
1731
1732 printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
1733 kvm_vmm_info->vmm_entry);
1734 printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
1735 KVM_VMM_BASE + func_offset);
1736
1737 return 0;
1738}
1739
1740int kvm_arch_init(void *opaque)
1741{
1742 int r;
1743 struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
1744
1745 if (!vti_cpu_has_kvm_support()) {
1746 printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
1747 r = -EOPNOTSUPP;
1748 goto out;
1749 }
1750
1751 if (kvm_vmm_info) {
1752 printk(KERN_ERR "kvm: Already loaded VMM module!\n");
1753 r = -EEXIST;
1754 goto out;
1755 }
1756
1757 r = -ENOMEM;
1758 kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
1759 if (!kvm_vmm_info)
1760 goto out;
1761
1762 if (kvm_alloc_vmm_area())
1763 goto out_free0;
1764
1765 r = kvm_relocate_vmm(vmm_info, vmm_info->module);
1766 if (r)
1767 goto out_free1;
1768
1769 return 0;
1770
1771out_free1:
1772 kvm_free_vmm_area();
1773out_free0:
1774 kfree(kvm_vmm_info);
1775out:
1776 return r;
1777}
1778
1779void kvm_arch_exit(void)
1780{
1781 kvm_free_vmm_area();
1782 kfree(kvm_vmm_info);
1783 kvm_vmm_info = NULL;
1784}
1785
1786static void kvm_ia64_sync_dirty_log(struct kvm *kvm,
1787 struct kvm_memory_slot *memslot)
1788{
1789 int i;
1790 long base;
1791 unsigned long n;
1792 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1793 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
1794
1795 n = kvm_dirty_bitmap_bytes(memslot);
1796 base = memslot->base_gfn / BITS_PER_LONG;
1797
1798 spin_lock(&kvm->arch.dirty_log_lock);
1799 for (i = 0; i < n/sizeof(long); ++i) {
1800 memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
1801 dirty_bitmap[base + i] = 0;
1802 }
1803 spin_unlock(&kvm->arch.dirty_log_lock);
1804}
1805
1806int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1807 struct kvm_dirty_log *log)
1808{
1809 int r;
1810 unsigned long n;
1811 struct kvm_memory_slot *memslot;
1812 int is_dirty = 0;
1813
1814 mutex_lock(&kvm->slots_lock);
1815
1816 r = -EINVAL;
1817 if (log->slot >= KVM_MEMORY_SLOTS)
1818 goto out;
1819
1820 memslot = &kvm->memslots->memslots[log->slot];
1821 r = -ENOENT;
1822 if (!memslot->dirty_bitmap)
1823 goto out;
1824
1825 kvm_ia64_sync_dirty_log(kvm, memslot);
1826 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1827 if (r)
1828 goto out;
1829
1830
1831 if (is_dirty) {
1832 kvm_flush_remote_tlbs(kvm);
1833 n = kvm_dirty_bitmap_bytes(memslot);
1834 memset(memslot->dirty_bitmap, 0, n);
1835 }
1836 r = 0;
1837out:
1838 mutex_unlock(&kvm->slots_lock);
1839 return r;
1840}
1841
1842int kvm_arch_hardware_setup(void)
1843{
1844 return 0;
1845}
1846
1847void kvm_arch_hardware_unsetup(void)
1848{
1849}
1850
1851void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1852{
1853 int me;
1854 int cpu = vcpu->cpu;
1855
1856 if (waitqueue_active(&vcpu->wq))
1857 wake_up_interruptible(&vcpu->wq);
1858
1859 me = get_cpu();
1860 if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
1861 if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
1862 smp_send_reschedule(cpu);
1863 put_cpu();
1864}
1865
1866int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
1867{
1868 return __apic_accept_irq(vcpu, irq->vector);
1869}
1870
1871int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
1872{
1873 return apic->vcpu->vcpu_id == dest;
1874}
1875
1876int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
1877{
1878 return 0;
1879}
1880
1881int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1882{
1883 return vcpu1->arch.xtp - vcpu2->arch.xtp;
1884}
1885
1886int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
1887 int short_hand, int dest, int dest_mode)
1888{
1889 struct kvm_lapic *target = vcpu->arch.apic;
1890 return (dest_mode == 0) ?
1891 kvm_apic_match_physical_addr(target, dest) :
1892 kvm_apic_match_logical_addr(target, dest);
1893}
1894
1895static int find_highest_bits(int *dat)
1896{
1897 u32 bits, bitnum;
1898 int i;
1899
1900
1901 for (i = 7; i >= 0 ; i--) {
1902 bits = dat[i];
1903 if (bits) {
1904 bitnum = fls(bits);
1905 return i * 32 + bitnum - 1;
1906 }
1907 }
1908
1909 return -1;
1910}
1911
1912int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
1913{
1914 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1915
1916 if (vpd->irr[0] & (1UL << NMI_VECTOR))
1917 return NMI_VECTOR;
1918 if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
1919 return ExtINT_VECTOR;
1920
1921 return find_highest_bits((int *)&vpd->irr[0]);
1922}
1923
1924int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1925{
1926 return vcpu->arch.timer_fired;
1927}
1928
1929int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1930{
1931 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) ||
1932 (kvm_highest_pending_irq(vcpu) != -1);
1933}
1934
1935int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1936 struct kvm_mp_state *mp_state)
1937{
1938 mp_state->mp_state = vcpu->arch.mp_state;
1939 return 0;
1940}
1941
1942static int vcpu_reset(struct kvm_vcpu *vcpu)
1943{
1944 int r;
1945 long psr;
1946 local_irq_save(psr);
1947 r = kvm_insert_vmm_mapping(vcpu);
1948 local_irq_restore(psr);
1949 if (r)
1950 goto fail;
1951
1952 vcpu->arch.launched = 0;
1953 kvm_arch_vcpu_uninit(vcpu);
1954 r = kvm_arch_vcpu_init(vcpu);
1955 if (r)
1956 goto fail;
1957
1958 kvm_purge_vmm_mapping(vcpu);
1959 r = 0;
1960fail:
1961 return r;
1962}
1963
1964int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1965 struct kvm_mp_state *mp_state)
1966{
1967 int r = 0;
1968
1969 vcpu->arch.mp_state = mp_state->mp_state;
1970 if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
1971 r = vcpu_reset(vcpu);
1972 return r;
1973}
1974