1
2
3
4
5
6
7
8
9
10
11
12#include <linux/bitops.h>
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/kdebug.h>
16#include <linux/module.h>
17#include <linux/uaccess.h>
18#include <linux/vmalloc.h>
19#include <linux/sched/signal.h>
20#include <linux/fs.h>
21#include <linux/memblock.h>
22#include <linux/pgtable.h>
23
24#include <asm/fpu.h>
25#include <asm/page.h>
26#include <asm/cacheflush.h>
27#include <asm/mmu_context.h>
28#include <asm/pgalloc.h>
29
30#include <linux/kvm_host.h>
31
32#include "interrupt.h"
33
34#define CREATE_TRACE_POINTS
35#include "trace.h"
36
37#ifndef VECTORSPACING
38#define VECTORSPACING 0x100
39#endif
40
41struct kvm_stats_debugfs_item debugfs_entries[] = {
42 VCPU_STAT("wait", wait_exits),
43 VCPU_STAT("cache", cache_exits),
44 VCPU_STAT("signal", signal_exits),
45 VCPU_STAT("interrupt", int_exits),
46 VCPU_STAT("cop_unusable", cop_unusable_exits),
47 VCPU_STAT("tlbmod", tlbmod_exits),
48 VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits),
49 VCPU_STAT("tlbmiss_st", tlbmiss_st_exits),
50 VCPU_STAT("addrerr_st", addrerr_st_exits),
51 VCPU_STAT("addrerr_ld", addrerr_ld_exits),
52 VCPU_STAT("syscall", syscall_exits),
53 VCPU_STAT("resvd_inst", resvd_inst_exits),
54 VCPU_STAT("break_inst", break_inst_exits),
55 VCPU_STAT("trap_inst", trap_inst_exits),
56 VCPU_STAT("msa_fpe", msa_fpe_exits),
57 VCPU_STAT("fpe", fpe_exits),
58 VCPU_STAT("msa_disabled", msa_disabled_exits),
59 VCPU_STAT("flush_dcache", flush_dcache_exits),
60 VCPU_STAT("vz_gpsi", vz_gpsi_exits),
61 VCPU_STAT("vz_gsfc", vz_gsfc_exits),
62 VCPU_STAT("vz_hc", vz_hc_exits),
63 VCPU_STAT("vz_grr", vz_grr_exits),
64 VCPU_STAT("vz_gva", vz_gva_exits),
65 VCPU_STAT("vz_ghfc", vz_ghfc_exits),
66 VCPU_STAT("vz_gpa", vz_gpa_exits),
67 VCPU_STAT("vz_resvd", vz_resvd_exits),
68#ifdef CONFIG_CPU_LOONGSON64
69 VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
70#endif
71 VCPU_STAT("halt_successful_poll", halt_successful_poll),
72 VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
73 VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
74 VCPU_STAT("halt_wakeup", halt_wakeup),
75 VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
76 VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
77 {NULL}
78};
79
80bool kvm_trace_guest_mode_change;
81
82int kvm_guest_mode_change_trace_reg(void)
83{
84 kvm_trace_guest_mode_change = true;
85 return 0;
86}
87
88void kvm_guest_mode_change_trace_unreg(void)
89{
90 kvm_trace_guest_mode_change = false;
91}
92
93
94
95
96
97int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
98{
99 return !!(vcpu->arch.pending_exceptions);
100}
101
102bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
103{
104 return false;
105}
106
107int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
108{
109 return 1;
110}
111
112int kvm_arch_hardware_enable(void)
113{
114 return kvm_mips_callbacks->hardware_enable();
115}
116
117void kvm_arch_hardware_disable(void)
118{
119 kvm_mips_callbacks->hardware_disable();
120}
121
122int kvm_arch_hardware_setup(void *opaque)
123{
124 return 0;
125}
126
127int kvm_arch_check_processor_compat(void *opaque)
128{
129 return 0;
130}
131
132extern void kvm_init_loongson_ipi(struct kvm *kvm);
133
134int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
135{
136 switch (type) {
137 case KVM_VM_MIPS_AUTO:
138 break;
139 case KVM_VM_MIPS_VZ:
140 break;
141 default:
142
143 return -EINVAL;
144 }
145
146
147 kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
148 if (!kvm->arch.gpa_mm.pgd)
149 return -ENOMEM;
150
151#ifdef CONFIG_CPU_LOONGSON64
152 kvm_init_loongson_ipi(kvm);
153#endif
154
155 return 0;
156}
157
158void kvm_mips_free_vcpus(struct kvm *kvm)
159{
160 unsigned int i;
161 struct kvm_vcpu *vcpu;
162
163 kvm_for_each_vcpu(i, vcpu, kvm) {
164 kvm_vcpu_destroy(vcpu);
165 }
166
167 mutex_lock(&kvm->lock);
168
169 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
170 kvm->vcpus[i] = NULL;
171
172 atomic_set(&kvm->online_vcpus, 0);
173
174 mutex_unlock(&kvm->lock);
175}
176
177static void kvm_mips_free_gpa_pt(struct kvm *kvm)
178{
179
180 WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
181 pgd_free(NULL, kvm->arch.gpa_mm.pgd);
182}
183
184void kvm_arch_destroy_vm(struct kvm *kvm)
185{
186 kvm_mips_free_vcpus(kvm);
187 kvm_mips_free_gpa_pt(kvm);
188}
189
190long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
191 unsigned long arg)
192{
193 return -ENOIOCTLCMD;
194}
195
196void kvm_arch_flush_shadow_all(struct kvm *kvm)
197{
198
199 kvm_mips_flush_gpa_pt(kvm, 0, ~0);
200 kvm_flush_remote_tlbs(kvm);
201}
202
203void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
204 struct kvm_memory_slot *slot)
205{
206
207
208
209
210
211 spin_lock(&kvm->mmu_lock);
212
213 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
214 slot->base_gfn + slot->npages - 1);
215 kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
216 spin_unlock(&kvm->mmu_lock);
217}
218
219int kvm_arch_prepare_memory_region(struct kvm *kvm,
220 struct kvm_memory_slot *memslot,
221 const struct kvm_userspace_memory_region *mem,
222 enum kvm_mr_change change)
223{
224 return 0;
225}
226
227void kvm_arch_commit_memory_region(struct kvm *kvm,
228 const struct kvm_userspace_memory_region *mem,
229 struct kvm_memory_slot *old,
230 const struct kvm_memory_slot *new,
231 enum kvm_mr_change change)
232{
233 int needs_flush;
234
235 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
236 __func__, kvm, mem->slot, mem->guest_phys_addr,
237 mem->memory_size, mem->userspace_addr);
238
239
240
241
242
243
244
245
246
247
248 if (change == KVM_MR_FLAGS_ONLY &&
249 (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
250 new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
251 spin_lock(&kvm->mmu_lock);
252
253 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
254 new->base_gfn + new->npages - 1);
255 if (needs_flush)
256 kvm_arch_flush_remote_tlbs_memslot(kvm, new);
257 spin_unlock(&kvm->mmu_lock);
258 }
259}
260
261static inline void dump_handler(const char *symbol, void *start, void *end)
262{
263 u32 *p;
264
265 pr_debug("LEAF(%s)\n", symbol);
266
267 pr_debug("\t.set push\n");
268 pr_debug("\t.set noreorder\n");
269
270 for (p = start; p < (u32 *)end; ++p)
271 pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
272
273 pr_debug("\t.set\tpop\n");
274
275 pr_debug("\tEND(%s)\n", symbol);
276}
277
278
279static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
280{
281 struct kvm_vcpu *vcpu;
282
283 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
284
285 kvm_mips_callbacks->queue_timer_int(vcpu);
286
287 vcpu->arch.wait = 0;
288 rcuwait_wake_up(&vcpu->wait);
289
290 return kvm_mips_count_timeout(vcpu);
291}
292
293int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
294{
295 return 0;
296}
297
298int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
299{
300 int err, size;
301 void *gebase, *p, *handler, *refill_start, *refill_end;
302 int i;
303
304 kvm_debug("kvm @ %p: create cpu %d at %p\n",
305 vcpu->kvm, vcpu->vcpu_id, vcpu);
306
307 err = kvm_mips_callbacks->vcpu_init(vcpu);
308 if (err)
309 return err;
310
311 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
312 HRTIMER_MODE_REL);
313 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
314
315
316
317
318
319 if (cpu_has_veic || cpu_has_vint)
320 size = 0x200 + VECTORSPACING * 64;
321 else
322 size = 0x4000;
323
324 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
325
326 if (!gebase) {
327 err = -ENOMEM;
328 goto out_uninit_vcpu;
329 }
330 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
331 ALIGN(size, PAGE_SIZE), gebase);
332
333
334
335
336
337
338 if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
339 kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
340 gebase);
341 err = -ENOMEM;
342 goto out_free_gebase;
343 }
344
345
346 vcpu->arch.guest_ebase = gebase;
347
348
349 handler = gebase + 0x2000;
350
351
352 refill_start = gebase;
353 if (IS_ENABLED(CONFIG_64BIT))
354 refill_start += 0x080;
355 refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
356
357
358 kvm_mips_build_exception(gebase + 0x180, handler);
359
360
361 for (i = 0; i < 8; i++) {
362 kvm_debug("L1 Vectored handler @ %p\n",
363 gebase + 0x200 + (i * VECTORSPACING));
364 kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
365 handler);
366 }
367
368
369 p = handler;
370 p = kvm_mips_build_exit(p);
371
372
373 vcpu->arch.vcpu_run = p;
374 p = kvm_mips_build_vcpu_run(p);
375
376
377 pr_debug("#include <asm/asm.h>\n");
378 pr_debug("#include <asm/regdef.h>\n");
379 pr_debug("\n");
380 dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
381 dump_handler("kvm_tlb_refill", refill_start, refill_end);
382 dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
383 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
384
385
386 flush_icache_range((unsigned long)gebase,
387 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
388
389
390 vcpu->arch.last_sched_cpu = -1;
391 vcpu->arch.last_exec_cpu = -1;
392
393
394 err = kvm_mips_callbacks->vcpu_setup(vcpu);
395 if (err)
396 goto out_free_gebase;
397
398 return 0;
399
400out_free_gebase:
401 kfree(gebase);
402out_uninit_vcpu:
403 kvm_mips_callbacks->vcpu_uninit(vcpu);
404 return err;
405}
406
407void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
408{
409 hrtimer_cancel(&vcpu->arch.comparecount_timer);
410
411 kvm_mips_dump_stats(vcpu);
412
413 kvm_mmu_free_memory_caches(vcpu);
414 kfree(vcpu->arch.guest_ebase);
415
416 kvm_mips_callbacks->vcpu_uninit(vcpu);
417}
418
419int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
420 struct kvm_guest_debug *dbg)
421{
422 return -ENOIOCTLCMD;
423}
424
425int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
426{
427 int r = -EINTR;
428
429 vcpu_load(vcpu);
430
431 kvm_sigset_activate(vcpu);
432
433 if (vcpu->mmio_needed) {
434 if (!vcpu->mmio_is_write)
435 kvm_mips_complete_mmio_load(vcpu);
436 vcpu->mmio_needed = 0;
437 }
438
439 if (vcpu->run->immediate_exit)
440 goto out;
441
442 lose_fpu(1);
443
444 local_irq_disable();
445 guest_enter_irqoff();
446 trace_kvm_enter(vcpu);
447
448
449
450
451
452
453
454 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
455
456 r = kvm_mips_callbacks->vcpu_run(vcpu);
457
458 trace_kvm_out(vcpu);
459 guest_exit_irqoff();
460 local_irq_enable();
461
462out:
463 kvm_sigset_deactivate(vcpu);
464
465 vcpu_put(vcpu);
466 return r;
467}
468
469int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
470 struct kvm_mips_interrupt *irq)
471{
472 int intr = (int)irq->irq;
473 struct kvm_vcpu *dvcpu = NULL;
474
475 if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
476 intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
477 intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
478 intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
479 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
480 (int)intr);
481
482 if (irq->cpu == -1)
483 dvcpu = vcpu;
484 else
485 dvcpu = vcpu->kvm->vcpus[irq->cpu];
486
487 if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
488 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
489
490 } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
491 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
492 } else {
493 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
494 irq->cpu, irq->irq);
495 return -EINVAL;
496 }
497
498 dvcpu->arch.wait = 0;
499
500 rcuwait_wake_up(&dvcpu->wait);
501
502 return 0;
503}
504
505int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
506 struct kvm_mp_state *mp_state)
507{
508 return -ENOIOCTLCMD;
509}
510
511int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
512 struct kvm_mp_state *mp_state)
513{
514 return -ENOIOCTLCMD;
515}
516
517static u64 kvm_mips_get_one_regs[] = {
518 KVM_REG_MIPS_R0,
519 KVM_REG_MIPS_R1,
520 KVM_REG_MIPS_R2,
521 KVM_REG_MIPS_R3,
522 KVM_REG_MIPS_R4,
523 KVM_REG_MIPS_R5,
524 KVM_REG_MIPS_R6,
525 KVM_REG_MIPS_R7,
526 KVM_REG_MIPS_R8,
527 KVM_REG_MIPS_R9,
528 KVM_REG_MIPS_R10,
529 KVM_REG_MIPS_R11,
530 KVM_REG_MIPS_R12,
531 KVM_REG_MIPS_R13,
532 KVM_REG_MIPS_R14,
533 KVM_REG_MIPS_R15,
534 KVM_REG_MIPS_R16,
535 KVM_REG_MIPS_R17,
536 KVM_REG_MIPS_R18,
537 KVM_REG_MIPS_R19,
538 KVM_REG_MIPS_R20,
539 KVM_REG_MIPS_R21,
540 KVM_REG_MIPS_R22,
541 KVM_REG_MIPS_R23,
542 KVM_REG_MIPS_R24,
543 KVM_REG_MIPS_R25,
544 KVM_REG_MIPS_R26,
545 KVM_REG_MIPS_R27,
546 KVM_REG_MIPS_R28,
547 KVM_REG_MIPS_R29,
548 KVM_REG_MIPS_R30,
549 KVM_REG_MIPS_R31,
550
551#ifndef CONFIG_CPU_MIPSR6
552 KVM_REG_MIPS_HI,
553 KVM_REG_MIPS_LO,
554#endif
555 KVM_REG_MIPS_PC,
556};
557
558static u64 kvm_mips_get_one_regs_fpu[] = {
559 KVM_REG_MIPS_FCR_IR,
560 KVM_REG_MIPS_FCR_CSR,
561};
562
563static u64 kvm_mips_get_one_regs_msa[] = {
564 KVM_REG_MIPS_MSA_IR,
565 KVM_REG_MIPS_MSA_CSR,
566};
567
568static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
569{
570 unsigned long ret;
571
572 ret = ARRAY_SIZE(kvm_mips_get_one_regs);
573 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
574 ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
575
576 if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
577 ret += 16;
578 }
579 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
580 ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
581 ret += kvm_mips_callbacks->num_regs(vcpu);
582
583 return ret;
584}
585
586static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
587{
588 u64 index;
589 unsigned int i;
590
591 if (copy_to_user(indices, kvm_mips_get_one_regs,
592 sizeof(kvm_mips_get_one_regs)))
593 return -EFAULT;
594 indices += ARRAY_SIZE(kvm_mips_get_one_regs);
595
596 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
597 if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
598 sizeof(kvm_mips_get_one_regs_fpu)))
599 return -EFAULT;
600 indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
601
602 for (i = 0; i < 32; ++i) {
603 index = KVM_REG_MIPS_FPR_32(i);
604 if (copy_to_user(indices, &index, sizeof(index)))
605 return -EFAULT;
606 ++indices;
607
608
609 if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
610 continue;
611
612 index = KVM_REG_MIPS_FPR_64(i);
613 if (copy_to_user(indices, &index, sizeof(index)))
614 return -EFAULT;
615 ++indices;
616 }
617 }
618
619 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
620 if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
621 sizeof(kvm_mips_get_one_regs_msa)))
622 return -EFAULT;
623 indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
624
625 for (i = 0; i < 32; ++i) {
626 index = KVM_REG_MIPS_VEC_128(i);
627 if (copy_to_user(indices, &index, sizeof(index)))
628 return -EFAULT;
629 ++indices;
630 }
631 }
632
633 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
634}
635
636static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
637 const struct kvm_one_reg *reg)
638{
639 struct mips_coproc *cop0 = vcpu->arch.cop0;
640 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
641 int ret;
642 s64 v;
643 s64 vs[2];
644 unsigned int idx;
645
646 switch (reg->id) {
647
648 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
649 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
650 break;
651#ifndef CONFIG_CPU_MIPSR6
652 case KVM_REG_MIPS_HI:
653 v = (long)vcpu->arch.hi;
654 break;
655 case KVM_REG_MIPS_LO:
656 v = (long)vcpu->arch.lo;
657 break;
658#endif
659 case KVM_REG_MIPS_PC:
660 v = (long)vcpu->arch.pc;
661 break;
662
663
664 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
665 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
666 return -EINVAL;
667 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
668
669 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
670 v = get_fpr32(&fpu->fpr[idx], 0);
671 else
672 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
673 break;
674 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
675 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
676 return -EINVAL;
677 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
678
679 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
680 return -EINVAL;
681 v = get_fpr64(&fpu->fpr[idx], 0);
682 break;
683 case KVM_REG_MIPS_FCR_IR:
684 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
685 return -EINVAL;
686 v = boot_cpu_data.fpu_id;
687 break;
688 case KVM_REG_MIPS_FCR_CSR:
689 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
690 return -EINVAL;
691 v = fpu->fcr31;
692 break;
693
694
695 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
696 if (!kvm_mips_guest_has_msa(&vcpu->arch))
697 return -EINVAL;
698
699 if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
700 return -EINVAL;
701 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
702#ifdef CONFIG_CPU_LITTLE_ENDIAN
703
704 vs[0] = get_fpr64(&fpu->fpr[idx], 0);
705 vs[1] = get_fpr64(&fpu->fpr[idx], 1);
706#else
707
708 vs[0] = get_fpr64(&fpu->fpr[idx], 1);
709 vs[1] = get_fpr64(&fpu->fpr[idx], 0);
710#endif
711 break;
712 case KVM_REG_MIPS_MSA_IR:
713 if (!kvm_mips_guest_has_msa(&vcpu->arch))
714 return -EINVAL;
715 v = boot_cpu_data.msa_id;
716 break;
717 case KVM_REG_MIPS_MSA_CSR:
718 if (!kvm_mips_guest_has_msa(&vcpu->arch))
719 return -EINVAL;
720 v = fpu->msacsr;
721 break;
722
723
724 default:
725 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
726 if (ret)
727 return ret;
728 break;
729 }
730 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
731 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
732
733 return put_user(v, uaddr64);
734 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
735 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
736 u32 v32 = (u32)v;
737
738 return put_user(v32, uaddr32);
739 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
740 void __user *uaddr = (void __user *)(long)reg->addr;
741
742 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
743 } else {
744 return -EINVAL;
745 }
746}
747
748static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
749 const struct kvm_one_reg *reg)
750{
751 struct mips_coproc *cop0 = vcpu->arch.cop0;
752 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
753 s64 v;
754 s64 vs[2];
755 unsigned int idx;
756
757 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
758 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
759
760 if (get_user(v, uaddr64) != 0)
761 return -EFAULT;
762 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
763 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
764 s32 v32;
765
766 if (get_user(v32, uaddr32) != 0)
767 return -EFAULT;
768 v = (s64)v32;
769 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
770 void __user *uaddr = (void __user *)(long)reg->addr;
771
772 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
773 } else {
774 return -EINVAL;
775 }
776
777 switch (reg->id) {
778
779 case KVM_REG_MIPS_R0:
780
781 break;
782 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
783 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
784 break;
785#ifndef CONFIG_CPU_MIPSR6
786 case KVM_REG_MIPS_HI:
787 vcpu->arch.hi = v;
788 break;
789 case KVM_REG_MIPS_LO:
790 vcpu->arch.lo = v;
791 break;
792#endif
793 case KVM_REG_MIPS_PC:
794 vcpu->arch.pc = v;
795 break;
796
797
798 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
799 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
800 return -EINVAL;
801 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
802
803 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
804 set_fpr32(&fpu->fpr[idx], 0, v);
805 else
806 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
807 break;
808 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
809 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
810 return -EINVAL;
811 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
812
813 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
814 return -EINVAL;
815 set_fpr64(&fpu->fpr[idx], 0, v);
816 break;
817 case KVM_REG_MIPS_FCR_IR:
818 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
819 return -EINVAL;
820
821 break;
822 case KVM_REG_MIPS_FCR_CSR:
823 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
824 return -EINVAL;
825 fpu->fcr31 = v;
826 break;
827
828
829 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
830 if (!kvm_mips_guest_has_msa(&vcpu->arch))
831 return -EINVAL;
832 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
833#ifdef CONFIG_CPU_LITTLE_ENDIAN
834
835 set_fpr64(&fpu->fpr[idx], 0, vs[0]);
836 set_fpr64(&fpu->fpr[idx], 1, vs[1]);
837#else
838
839 set_fpr64(&fpu->fpr[idx], 1, vs[0]);
840 set_fpr64(&fpu->fpr[idx], 0, vs[1]);
841#endif
842 break;
843 case KVM_REG_MIPS_MSA_IR:
844 if (!kvm_mips_guest_has_msa(&vcpu->arch))
845 return -EINVAL;
846
847 break;
848 case KVM_REG_MIPS_MSA_CSR:
849 if (!kvm_mips_guest_has_msa(&vcpu->arch))
850 return -EINVAL;
851 fpu->msacsr = v;
852 break;
853
854
855 default:
856 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
857 }
858 return 0;
859}
860
861static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
862 struct kvm_enable_cap *cap)
863{
864 int r = 0;
865
866 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
867 return -EINVAL;
868 if (cap->flags)
869 return -EINVAL;
870 if (cap->args[0])
871 return -EINVAL;
872
873 switch (cap->cap) {
874 case KVM_CAP_MIPS_FPU:
875 vcpu->arch.fpu_enabled = true;
876 break;
877 case KVM_CAP_MIPS_MSA:
878 vcpu->arch.msa_enabled = true;
879 break;
880 default:
881 r = -EINVAL;
882 break;
883 }
884
885 return r;
886}
887
888long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
889 unsigned long arg)
890{
891 struct kvm_vcpu *vcpu = filp->private_data;
892 void __user *argp = (void __user *)arg;
893
894 if (ioctl == KVM_INTERRUPT) {
895 struct kvm_mips_interrupt irq;
896
897 if (copy_from_user(&irq, argp, sizeof(irq)))
898 return -EFAULT;
899 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
900 irq.irq);
901
902 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
903 }
904
905 return -ENOIOCTLCMD;
906}
907
908long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
909 unsigned long arg)
910{
911 struct kvm_vcpu *vcpu = filp->private_data;
912 void __user *argp = (void __user *)arg;
913 long r;
914
915 vcpu_load(vcpu);
916
917 switch (ioctl) {
918 case KVM_SET_ONE_REG:
919 case KVM_GET_ONE_REG: {
920 struct kvm_one_reg reg;
921
922 r = -EFAULT;
923 if (copy_from_user(®, argp, sizeof(reg)))
924 break;
925 if (ioctl == KVM_SET_ONE_REG)
926 r = kvm_mips_set_reg(vcpu, ®);
927 else
928 r = kvm_mips_get_reg(vcpu, ®);
929 break;
930 }
931 case KVM_GET_REG_LIST: {
932 struct kvm_reg_list __user *user_list = argp;
933 struct kvm_reg_list reg_list;
934 unsigned n;
935
936 r = -EFAULT;
937 if (copy_from_user(®_list, user_list, sizeof(reg_list)))
938 break;
939 n = reg_list.n;
940 reg_list.n = kvm_mips_num_regs(vcpu);
941 if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
942 break;
943 r = -E2BIG;
944 if (n < reg_list.n)
945 break;
946 r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
947 break;
948 }
949 case KVM_ENABLE_CAP: {
950 struct kvm_enable_cap cap;
951
952 r = -EFAULT;
953 if (copy_from_user(&cap, argp, sizeof(cap)))
954 break;
955 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
956 break;
957 }
958 default:
959 r = -ENOIOCTLCMD;
960 }
961
962 vcpu_put(vcpu);
963 return r;
964}
965
966void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
967{
968
969}
970
971int kvm_arch_flush_remote_tlb(struct kvm *kvm)
972{
973 kvm_mips_callbacks->prepare_flush_shadow(kvm);
974 return 1;
975}
976
977void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
978 const struct kvm_memory_slot *memslot)
979{
980 kvm_flush_remote_tlbs(kvm);
981}
982
983long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
984{
985 long r;
986
987 switch (ioctl) {
988 default:
989 r = -ENOIOCTLCMD;
990 }
991
992 return r;
993}
994
995int kvm_arch_init(void *opaque)
996{
997 if (kvm_mips_callbacks) {
998 kvm_err("kvm: module already exists\n");
999 return -EEXIST;
1000 }
1001
1002 return kvm_mips_emulation_init(&kvm_mips_callbacks);
1003}
1004
1005void kvm_arch_exit(void)
1006{
1007 kvm_mips_callbacks = NULL;
1008}
1009
1010int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1011 struct kvm_sregs *sregs)
1012{
1013 return -ENOIOCTLCMD;
1014}
1015
1016int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1017 struct kvm_sregs *sregs)
1018{
1019 return -ENOIOCTLCMD;
1020}
1021
1022void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1023{
1024}
1025
1026int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1027{
1028 return -ENOIOCTLCMD;
1029}
1030
1031int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1032{
1033 return -ENOIOCTLCMD;
1034}
1035
1036vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1037{
1038 return VM_FAULT_SIGBUS;
1039}
1040
1041int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1042{
1043 int r;
1044
1045 switch (ext) {
1046 case KVM_CAP_ONE_REG:
1047 case KVM_CAP_ENABLE_CAP:
1048 case KVM_CAP_READONLY_MEM:
1049 case KVM_CAP_SYNC_MMU:
1050 case KVM_CAP_IMMEDIATE_EXIT:
1051 r = 1;
1052 break;
1053 case KVM_CAP_NR_VCPUS:
1054 r = num_online_cpus();
1055 break;
1056 case KVM_CAP_MAX_VCPUS:
1057 r = KVM_MAX_VCPUS;
1058 break;
1059 case KVM_CAP_MAX_VCPU_ID:
1060 r = KVM_MAX_VCPU_ID;
1061 break;
1062 case KVM_CAP_MIPS_FPU:
1063
1064 r = !!raw_cpu_has_fpu;
1065 break;
1066 case KVM_CAP_MIPS_MSA:
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079 r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1080 break;
1081 default:
1082 r = kvm_mips_callbacks->check_extension(kvm, ext);
1083 break;
1084 }
1085 return r;
1086}
1087
1088int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1089{
1090 return kvm_mips_pending_timer(vcpu) ||
1091 kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
1092}
1093
1094int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1095{
1096 int i;
1097 struct mips_coproc *cop0;
1098
1099 if (!vcpu)
1100 return -1;
1101
1102 kvm_debug("VCPU Register Dump:\n");
1103 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1104 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1105
1106 for (i = 0; i < 32; i += 4) {
1107 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1108 vcpu->arch.gprs[i],
1109 vcpu->arch.gprs[i + 1],
1110 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1111 }
1112 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1113 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1114
1115 cop0 = vcpu->arch.cop0;
1116 kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1117 kvm_read_c0_guest_status(cop0),
1118 kvm_read_c0_guest_cause(cop0));
1119
1120 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1121
1122 return 0;
1123}
1124
1125int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1126{
1127 int i;
1128
1129 vcpu_load(vcpu);
1130
1131 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1132 vcpu->arch.gprs[i] = regs->gpr[i];
1133 vcpu->arch.gprs[0] = 0;
1134 vcpu->arch.hi = regs->hi;
1135 vcpu->arch.lo = regs->lo;
1136 vcpu->arch.pc = regs->pc;
1137
1138 vcpu_put(vcpu);
1139 return 0;
1140}
1141
1142int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1143{
1144 int i;
1145
1146 vcpu_load(vcpu);
1147
1148 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1149 regs->gpr[i] = vcpu->arch.gprs[i];
1150
1151 regs->hi = vcpu->arch.hi;
1152 regs->lo = vcpu->arch.lo;
1153 regs->pc = vcpu->arch.pc;
1154
1155 vcpu_put(vcpu);
1156 return 0;
1157}
1158
1159int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1160 struct kvm_translation *tr)
1161{
1162 return 0;
1163}
1164
1165static void kvm_mips_set_c0_status(void)
1166{
1167 u32 status = read_c0_status();
1168
1169 if (cpu_has_dsp)
1170 status |= (ST0_MX);
1171
1172 write_c0_status(status);
1173 ehb();
1174}
1175
1176
1177
1178
1179int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
1180{
1181 struct kvm_run *run = vcpu->run;
1182 u32 cause = vcpu->arch.host_cp0_cause;
1183 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1184 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1185 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1186 enum emulation_result er = EMULATE_DONE;
1187 u32 inst;
1188 int ret = RESUME_GUEST;
1189
1190 vcpu->mode = OUTSIDE_GUEST_MODE;
1191
1192
1193 run->exit_reason = KVM_EXIT_UNKNOWN;
1194 run->ready_for_interrupt_injection = 1;
1195
1196
1197
1198
1199
1200 kvm_mips_set_c0_status();
1201
1202 local_irq_enable();
1203
1204 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1205 cause, opc, run, vcpu);
1206 trace_kvm_exit(vcpu, exccode);
1207
1208 switch (exccode) {
1209 case EXCCODE_INT:
1210 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1211
1212 ++vcpu->stat.int_exits;
1213
1214 if (need_resched())
1215 cond_resched();
1216
1217 ret = RESUME_GUEST;
1218 break;
1219
1220 case EXCCODE_CPU:
1221 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1222
1223 ++vcpu->stat.cop_unusable_exits;
1224 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1225
1226 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1227 ret = RESUME_HOST;
1228 break;
1229
1230 case EXCCODE_MOD:
1231 ++vcpu->stat.tlbmod_exits;
1232 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1233 break;
1234
1235 case EXCCODE_TLBS:
1236 kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1237 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1238 badvaddr);
1239
1240 ++vcpu->stat.tlbmiss_st_exits;
1241 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1242 break;
1243
1244 case EXCCODE_TLBL:
1245 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1246 cause, opc, badvaddr);
1247
1248 ++vcpu->stat.tlbmiss_ld_exits;
1249 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1250 break;
1251
1252 case EXCCODE_ADES:
1253 ++vcpu->stat.addrerr_st_exits;
1254 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1255 break;
1256
1257 case EXCCODE_ADEL:
1258 ++vcpu->stat.addrerr_ld_exits;
1259 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1260 break;
1261
1262 case EXCCODE_SYS:
1263 ++vcpu->stat.syscall_exits;
1264 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1265 break;
1266
1267 case EXCCODE_RI:
1268 ++vcpu->stat.resvd_inst_exits;
1269 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1270 break;
1271
1272 case EXCCODE_BP:
1273 ++vcpu->stat.break_inst_exits;
1274 ret = kvm_mips_callbacks->handle_break(vcpu);
1275 break;
1276
1277 case EXCCODE_TR:
1278 ++vcpu->stat.trap_inst_exits;
1279 ret = kvm_mips_callbacks->handle_trap(vcpu);
1280 break;
1281
1282 case EXCCODE_MSAFPE:
1283 ++vcpu->stat.msa_fpe_exits;
1284 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1285 break;
1286
1287 case EXCCODE_FPE:
1288 ++vcpu->stat.fpe_exits;
1289 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1290 break;
1291
1292 case EXCCODE_MSADIS:
1293 ++vcpu->stat.msa_disabled_exits;
1294 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1295 break;
1296
1297 case EXCCODE_GE:
1298
1299 ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
1300 break;
1301
1302 default:
1303 if (cause & CAUSEF_BD)
1304 opc += 1;
1305 inst = 0;
1306 kvm_get_badinstr(opc, vcpu, &inst);
1307 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
1308 exccode, opc, inst, badvaddr,
1309 kvm_read_c0_guest_status(vcpu->arch.cop0));
1310 kvm_arch_vcpu_dump_regs(vcpu);
1311 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1312 ret = RESUME_HOST;
1313 break;
1314
1315 }
1316
1317 local_irq_disable();
1318
1319 if (ret == RESUME_GUEST)
1320 kvm_vz_acquire_htimer(vcpu);
1321
1322 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1323 kvm_mips_deliver_interrupts(vcpu, cause);
1324
1325 if (!(ret & RESUME_HOST)) {
1326
1327 if (signal_pending(current)) {
1328 run->exit_reason = KVM_EXIT_INTR;
1329 ret = (-EINTR << 2) | RESUME_HOST;
1330 ++vcpu->stat.signal_exits;
1331 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1332 }
1333 }
1334
1335 if (ret == RESUME_GUEST) {
1336 trace_kvm_reenter(vcpu);
1337
1338
1339
1340
1341
1342
1343
1344 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1345
1346 kvm_mips_callbacks->vcpu_reenter(vcpu);
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1358 read_c0_status() & ST0_CU1)
1359 __kvm_restore_fcsr(&vcpu->arch);
1360
1361 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1362 read_c0_config5() & MIPS_CONF5_MSAEN)
1363 __kvm_restore_msacsr(&vcpu->arch);
1364 }
1365 return ret;
1366}
1367
1368
1369void kvm_own_fpu(struct kvm_vcpu *vcpu)
1370{
1371 struct mips_coproc *cop0 = vcpu->arch.cop0;
1372 unsigned int sr, cfg5;
1373
1374 preempt_disable();
1375
1376 sr = kvm_read_c0_guest_status(cop0);
1377
1378
1379
1380
1381
1382
1383
1384 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1385 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1386 kvm_lose_fpu(vcpu);
1387
1388
1389
1390
1391
1392 change_c0_status(ST0_CU1 | ST0_FR, sr);
1393 if (cpu_has_fre) {
1394 cfg5 = kvm_read_c0_guest_config5(cop0);
1395 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1396 }
1397 enable_fpu_hazard();
1398
1399
1400 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1401 __kvm_restore_fpu(&vcpu->arch);
1402 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1403 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1404 } else {
1405 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1406 }
1407
1408 preempt_enable();
1409}
1410
1411#ifdef CONFIG_CPU_HAS_MSA
1412
1413void kvm_own_msa(struct kvm_vcpu *vcpu)
1414{
1415 struct mips_coproc *cop0 = vcpu->arch.cop0;
1416 unsigned int sr, cfg5;
1417
1418 preempt_disable();
1419
1420
1421
1422
1423
1424 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1425 sr = kvm_read_c0_guest_status(cop0);
1426
1427
1428
1429
1430
1431 if (!(sr & ST0_FR) &&
1432 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1433 KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1434 kvm_lose_fpu(vcpu);
1435
1436 change_c0_status(ST0_CU1 | ST0_FR, sr);
1437 if (sr & ST0_CU1 && cpu_has_fre) {
1438 cfg5 = kvm_read_c0_guest_config5(cop0);
1439 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1440 }
1441 }
1442
1443
1444 set_c0_config5(MIPS_CONF5_MSAEN);
1445 enable_fpu_hazard();
1446
1447 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1448 case KVM_MIPS_AUX_FPU:
1449
1450
1451
1452 __kvm_restore_msa_upper(&vcpu->arch);
1453 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1454 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1455 break;
1456 case 0:
1457
1458 __kvm_restore_msa(&vcpu->arch);
1459 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1460 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1461 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1462 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1463 KVM_TRACE_AUX_FPU_MSA);
1464 break;
1465 default:
1466 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1467 break;
1468 }
1469
1470 preempt_enable();
1471}
1472#endif
1473
1474
1475void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1476{
1477 preempt_disable();
1478 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1479 disable_msa();
1480 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1481 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1482 }
1483 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1484 clear_c0_status(ST0_CU1 | ST0_FR);
1485 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1486 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1487 }
1488 preempt_enable();
1489}
1490
1491
1492void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1493{
1494
1495
1496
1497
1498
1499
1500
1501 preempt_disable();
1502 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1503 __kvm_save_msa(&vcpu->arch);
1504 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1505
1506
1507 disable_msa();
1508 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1509 clear_c0_status(ST0_CU1 | ST0_FR);
1510 disable_fpu_hazard();
1511 }
1512 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1513 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1514 __kvm_save_fpu(&vcpu->arch);
1515 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1516 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1517
1518
1519 clear_c0_status(ST0_CU1 | ST0_FR);
1520 disable_fpu_hazard();
1521 }
1522 preempt_enable();
1523}
1524
1525
1526
1527
1528
1529
1530static int kvm_mips_csr_die_notify(struct notifier_block *self,
1531 unsigned long cmd, void *ptr)
1532{
1533 struct die_args *args = (struct die_args *)ptr;
1534 struct pt_regs *regs = args->regs;
1535 unsigned long pc;
1536
1537
1538 if (cmd != DIE_FP && cmd != DIE_MSAFP)
1539 return NOTIFY_DONE;
1540
1541
1542 if (!(current->flags & PF_VCPU))
1543 return NOTIFY_DONE;
1544
1545
1546 BUG_ON(user_mode(regs));
1547
1548 pc = instruction_pointer(regs);
1549 switch (cmd) {
1550 case DIE_FP:
1551
1552 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1553 return NOTIFY_DONE;
1554 break;
1555 case DIE_MSAFP:
1556
1557 if (!cpu_has_msa ||
1558 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1559 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1560 return NOTIFY_DONE;
1561 break;
1562 }
1563
1564
1565 instruction_pointer(regs) += 4;
1566
1567 return NOTIFY_STOP;
1568}
1569
1570static struct notifier_block kvm_mips_csr_die_notifier = {
1571 .notifier_call = kvm_mips_csr_die_notify,
1572};
1573
1574static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
1575 [MIPS_EXC_INT_TIMER] = C_IRQ5,
1576 [MIPS_EXC_INT_IO_1] = C_IRQ0,
1577 [MIPS_EXC_INT_IPI_1] = C_IRQ1,
1578 [MIPS_EXC_INT_IPI_2] = C_IRQ2,
1579};
1580
1581static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
1582 [MIPS_EXC_INT_TIMER] = C_IRQ5,
1583 [MIPS_EXC_INT_IO_1] = C_IRQ0,
1584 [MIPS_EXC_INT_IO_2] = C_IRQ1,
1585 [MIPS_EXC_INT_IPI_1] = C_IRQ4,
1586};
1587
1588u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
1589
1590u32 kvm_irq_to_priority(u32 irq)
1591{
1592 int i;
1593
1594 for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
1595 if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
1596 return i;
1597 }
1598
1599 return MIPS_EXC_MAX;
1600}
1601
1602static int __init kvm_mips_init(void)
1603{
1604 int ret;
1605
1606 if (cpu_has_mmid) {
1607 pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1608 return -EOPNOTSUPP;
1609 }
1610
1611 ret = kvm_mips_entry_setup();
1612 if (ret)
1613 return ret;
1614
1615 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1616
1617 if (ret)
1618 return ret;
1619
1620 if (boot_cpu_type() == CPU_LOONGSON64)
1621 kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
1622
1623 register_die_notifier(&kvm_mips_csr_die_notifier);
1624
1625 return 0;
1626}
1627
1628static void __exit kvm_mips_exit(void)
1629{
1630 kvm_exit();
1631
1632 unregister_die_notifier(&kvm_mips_csr_die_notifier);
1633}
1634
1635module_init(kvm_mips_init);
1636module_exit(kvm_mips_exit);
1637
1638EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
1639