1
2
3
4
5
6
7
8
9
10
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/ktime.h>
15#include <linux/kvm_host.h>
16#include <linux/module.h>
17#include <linux/vmalloc.h>
18#include <linux/fs.h>
19#include <linux/bootmem.h>
20#include <linux/random.h>
21#include <asm/page.h>
22#include <asm/cacheflush.h>
23#include <asm/cacheops.h>
24#include <asm/cpu-info.h>
25#include <asm/mmu_context.h>
26#include <asm/tlbflush.h>
27#include <asm/inst.h>
28
29#undef CONFIG_MIPS_MT
30#include <asm/r4kcache.h>
31#define CONFIG_MIPS_MT
32
33#include "interrupt.h"
34#include "commpage.h"
35
36#include "trace.h"
37
38
39
40
41
42unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
43 unsigned long instpc)
44{
45 unsigned int dspcontrol;
46 union mips_instruction insn;
47 struct kvm_vcpu_arch *arch = &vcpu->arch;
48 long epc = instpc;
49 long nextpc = KVM_INVALID_INST;
50
51 if (epc & 3)
52 goto unaligned;
53
54
55 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
56
57 if (insn.word == KVM_INVALID_INST)
58 return KVM_INVALID_INST;
59
60 switch (insn.i_format.opcode) {
61
62 case spec_op:
63 switch (insn.r_format.func) {
64 case jalr_op:
65 arch->gprs[insn.r_format.rd] = epc + 8;
66
67 case jr_op:
68 nextpc = arch->gprs[insn.r_format.rs];
69 break;
70 }
71 break;
72
73
74
75
76
77
78 case bcond_op:
79 switch (insn.i_format.rt) {
80 case bltz_op:
81 case bltzl_op:
82 if ((long)arch->gprs[insn.i_format.rs] < 0)
83 epc = epc + 4 + (insn.i_format.simmediate << 2);
84 else
85 epc += 8;
86 nextpc = epc;
87 break;
88
89 case bgez_op:
90 case bgezl_op:
91 if ((long)arch->gprs[insn.i_format.rs] >= 0)
92 epc = epc + 4 + (insn.i_format.simmediate << 2);
93 else
94 epc += 8;
95 nextpc = epc;
96 break;
97
98 case bltzal_op:
99 case bltzall_op:
100 arch->gprs[31] = epc + 8;
101 if ((long)arch->gprs[insn.i_format.rs] < 0)
102 epc = epc + 4 + (insn.i_format.simmediate << 2);
103 else
104 epc += 8;
105 nextpc = epc;
106 break;
107
108 case bgezal_op:
109 case bgezall_op:
110 arch->gprs[31] = epc + 8;
111 if ((long)arch->gprs[insn.i_format.rs] >= 0)
112 epc = epc + 4 + (insn.i_format.simmediate << 2);
113 else
114 epc += 8;
115 nextpc = epc;
116 break;
117 case bposge32_op:
118 if (!cpu_has_dsp)
119 goto sigill;
120
121 dspcontrol = rddsp(0x01);
122
123 if (dspcontrol >= 32)
124 epc = epc + 4 + (insn.i_format.simmediate << 2);
125 else
126 epc += 8;
127 nextpc = epc;
128 break;
129 }
130 break;
131
132
133 case jal_op:
134 arch->gprs[31] = instpc + 8;
135 case j_op:
136 epc += 4;
137 epc >>= 28;
138 epc <<= 28;
139 epc |= (insn.j_format.target << 2);
140 nextpc = epc;
141 break;
142
143
144 case beq_op:
145 case beql_op:
146 if (arch->gprs[insn.i_format.rs] ==
147 arch->gprs[insn.i_format.rt])
148 epc = epc + 4 + (insn.i_format.simmediate << 2);
149 else
150 epc += 8;
151 nextpc = epc;
152 break;
153
154 case bne_op:
155 case bnel_op:
156 if (arch->gprs[insn.i_format.rs] !=
157 arch->gprs[insn.i_format.rt])
158 epc = epc + 4 + (insn.i_format.simmediate << 2);
159 else
160 epc += 8;
161 nextpc = epc;
162 break;
163
164 case blez_op:
165 case blezl_op:
166
167 if ((long)arch->gprs[insn.i_format.rs] <= 0)
168 epc = epc + 4 + (insn.i_format.simmediate << 2);
169 else
170 epc += 8;
171 nextpc = epc;
172 break;
173
174 case bgtz_op:
175 case bgtzl_op:
176
177 if ((long)arch->gprs[insn.i_format.rs] > 0)
178 epc = epc + 4 + (insn.i_format.simmediate << 2);
179 else
180 epc += 8;
181 nextpc = epc;
182 break;
183
184
185 case cop1_op:
186 kvm_err("%s: unsupported cop1_op\n", __func__);
187 break;
188 }
189
190 return nextpc;
191
192unaligned:
193 kvm_err("%s: unaligned epc\n", __func__);
194 return nextpc;
195
196sigill:
197 kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
198 return nextpc;
199}
200
201enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
202{
203 unsigned long branch_pc;
204 enum emulation_result er = EMULATE_DONE;
205
206 if (cause & CAUSEF_BD) {
207 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
208 if (branch_pc == KVM_INVALID_INST) {
209 er = EMULATE_FAIL;
210 } else {
211 vcpu->arch.pc = branch_pc;
212 kvm_debug("BD update_pc(): New PC: %#lx\n",
213 vcpu->arch.pc);
214 }
215 } else
216 vcpu->arch.pc += 4;
217
218 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
219
220 return er;
221}
222
223
224
225
226
227
228
229
230
231static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
232{
233 struct mips_coproc *cop0 = vcpu->arch.cop0;
234
235 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
236 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
237}
238
239
240
241
242
243
244
245
246static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
247{
248 s64 now_ns, periods;
249 u64 delta;
250
251 now_ns = ktime_to_ns(now);
252 delta = now_ns + vcpu->arch.count_dyn_bias;
253
254 if (delta >= vcpu->arch.count_period) {
255
256 periods = div64_s64(now_ns, vcpu->arch.count_period);
257 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
258
259 delta = now_ns + vcpu->arch.count_dyn_bias;
260 }
261
262
263
264
265
266
267
268
269
270
271
272 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
273}
274
275
276
277
278
279
280
281
282
283
284
285static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
286{
287 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
288 return vcpu->arch.count_resume;
289
290 return ktime_get();
291}
292
293
294
295
296
297
298
299
300
301
302
303static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
304{
305 ktime_t expires;
306 int running;
307
308
309 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
310 if (ktime_compare(now, expires) >= 0) {
311
312
313
314
315 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
316
317
318 kvm_mips_callbacks->queue_timer_int(vcpu);
319
320
321
322
323
324 if (running) {
325 expires = ktime_add_ns(expires,
326 vcpu->arch.count_period);
327 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
328 HRTIMER_MODE_ABS);
329 }
330 }
331
332
333 return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
334}
335
336
337
338
339
340
341
342
343
344
345uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
346{
347 struct mips_coproc *cop0 = vcpu->arch.cop0;
348
349
350 if (kvm_mips_count_disabled(vcpu))
351 return kvm_read_c0_guest_count(cop0);
352
353 return kvm_mips_read_count_running(vcpu, ktime_get());
354}
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
373 uint32_t *count)
374{
375 ktime_t now;
376
377
378 hrtimer_cancel(&vcpu->arch.comparecount_timer);
379 now = ktime_get();
380
381
382 *count = kvm_mips_read_count_running(vcpu, now);
383
384 return now;
385}
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
404 ktime_t now, uint32_t count)
405{
406 struct mips_coproc *cop0 = vcpu->arch.cop0;
407 uint32_t compare;
408 u64 delta;
409 ktime_t expire;
410
411
412 compare = kvm_read_c0_guest_compare(cop0);
413 delta = (u64)(uint32_t)(compare - count - 1) + 1;
414 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
415 expire = ktime_add_ns(now, delta);
416
417
418 hrtimer_cancel(&vcpu->arch.comparecount_timer);
419 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
420}
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
436{
437 ktime_t now;
438 uint32_t count;
439
440
441
442
443
444 now = kvm_mips_freeze_hrtimer(vcpu, &count);
445 kvm_mips_resume_hrtimer(vcpu, now, count);
446}
447
448
449
450
451
452
453
454
455void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
456{
457 struct mips_coproc *cop0 = vcpu->arch.cop0;
458 ktime_t now;
459
460
461 now = kvm_mips_count_time(vcpu);
462 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
463
464 if (kvm_mips_count_disabled(vcpu))
465
466 kvm_write_c0_guest_count(cop0, count);
467 else
468
469 kvm_mips_resume_hrtimer(vcpu, now, count);
470}
471
472
473
474
475
476
477
478
479void kvm_mips_init_count(struct kvm_vcpu *vcpu)
480{
481
482 vcpu->arch.count_hz = 100*1000*1000;
483 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
484 vcpu->arch.count_hz);
485 vcpu->arch.count_dyn_bias = 0;
486
487
488 kvm_mips_write_count(vcpu, 0);
489}
490
491
492
493
494
495
496
497
498
499
500
501
502int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
503{
504 struct mips_coproc *cop0 = vcpu->arch.cop0;
505 int dc;
506 ktime_t now;
507 u32 count;
508
509
510 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
511 return -EINVAL;
512
513 if (vcpu->arch.count_hz == count_hz)
514 return 0;
515
516
517 dc = kvm_mips_count_disabled(vcpu);
518 if (dc) {
519 now = kvm_mips_count_time(vcpu);
520 count = kvm_read_c0_guest_count(cop0);
521 } else {
522 now = kvm_mips_freeze_hrtimer(vcpu, &count);
523 }
524
525
526 vcpu->arch.count_hz = count_hz;
527 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
528 vcpu->arch.count_dyn_bias = 0;
529
530
531 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
532
533
534 if (!dc)
535 kvm_mips_resume_hrtimer(vcpu, now, count);
536 return 0;
537}
538
539
540
541
542
543
544
545
546void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
547{
548 struct mips_coproc *cop0 = vcpu->arch.cop0;
549
550
551 if (kvm_read_c0_guest_compare(cop0) == compare)
552 return;
553
554
555 kvm_write_c0_guest_compare(cop0, compare);
556
557
558 if (!kvm_mips_count_disabled(vcpu))
559 kvm_mips_update_hrtimer(vcpu);
560}
561
562
563
564
565
566
567
568
569
570
571
572
573
574static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
575{
576 struct mips_coproc *cop0 = vcpu->arch.cop0;
577 uint32_t count;
578 ktime_t now;
579
580
581 hrtimer_cancel(&vcpu->arch.comparecount_timer);
582
583
584 now = ktime_get();
585 count = kvm_mips_read_count_running(vcpu, now);
586 kvm_write_c0_guest_count(cop0, count);
587
588 return now;
589}
590
591
592
593
594
595
596
597
598
599
600
601void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
602{
603 struct mips_coproc *cop0 = vcpu->arch.cop0;
604
605 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
606 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
607 kvm_mips_count_disable(vcpu);
608}
609
610
611
612
613
614
615
616
617
618
619
620
621void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
622{
623 struct mips_coproc *cop0 = vcpu->arch.cop0;
624 uint32_t count;
625
626 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
627
628
629
630
631
632
633 count = kvm_read_c0_guest_count(cop0);
634 kvm_mips_write_count(vcpu, count);
635}
636
637
638
639
640
641
642
643
644
645
646
647int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
648{
649 struct mips_coproc *cop0 = vcpu->arch.cop0;
650 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
651 s64 delta;
652 ktime_t expire, now;
653 uint32_t count, compare;
654
655
656 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
657 return -EINVAL;
658
659
660 vcpu->arch.count_ctl = count_ctl;
661
662
663 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
664
665 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
666 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
667
668 vcpu->arch.count_resume = ktime_get();
669 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
670
671 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
672 } else {
673
674
675
676
677 count = kvm_read_c0_guest_count(cop0);
678 compare = kvm_read_c0_guest_compare(cop0);
679 delta = (u64)(uint32_t)(compare - count - 1) + 1;
680 delta = div_u64(delta * NSEC_PER_SEC,
681 vcpu->arch.count_hz);
682 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
683
684
685 now = ktime_get();
686 if (ktime_compare(now, expire) >= 0)
687
688 kvm_mips_callbacks->queue_timer_int(vcpu);
689
690
691 count = kvm_mips_read_count_running(vcpu, now);
692 kvm_mips_resume_hrtimer(vcpu, now, count);
693 }
694 }
695
696 return 0;
697}
698
699
700
701
702
703
704
705
706
707
708
709int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
710{
711
712
713
714
715
716 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
717 return -EINVAL;
718
719 vcpu->arch.count_resume = ns_to_ktime(count_resume);
720 return 0;
721}
722
723
724
725
726
727
728
729
730
731enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
732{
733
734 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
735 vcpu->arch.count_period);
736 return HRTIMER_RESTART;
737}
738
739enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
740{
741 struct mips_coproc *cop0 = vcpu->arch.cop0;
742 enum emulation_result er = EMULATE_DONE;
743
744 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
745 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
746 kvm_read_c0_guest_epc(cop0));
747 kvm_clear_c0_guest_status(cop0, ST0_EXL);
748 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
749
750 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
751 kvm_clear_c0_guest_status(cop0, ST0_ERL);
752 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
753 } else {
754 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
755 vcpu->arch.pc);
756 er = EMULATE_FAIL;
757 }
758
759 return er;
760}
761
762enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
763{
764 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
765 vcpu->arch.pending_exceptions);
766
767 ++vcpu->stat.wait_exits;
768 trace_kvm_exit(vcpu, WAIT_EXITS);
769 if (!vcpu->arch.pending_exceptions) {
770 vcpu->arch.wait = 1;
771 kvm_vcpu_block(vcpu);
772
773
774
775
776
777 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
778 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
779 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
780 }
781 }
782
783 return EMULATE_DONE;
784}
785
786
787
788
789
790enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
791{
792 struct mips_coproc *cop0 = vcpu->arch.cop0;
793 uint32_t pc = vcpu->arch.pc;
794
795 kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
796 return EMULATE_FAIL;
797}
798
799
800enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
801{
802 struct mips_coproc *cop0 = vcpu->arch.cop0;
803 int index = kvm_read_c0_guest_index(cop0);
804 struct kvm_mips_tlb *tlb = NULL;
805 uint32_t pc = vcpu->arch.pc;
806
807 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
808 kvm_debug("%s: illegal index: %d\n", __func__, index);
809 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
810 pc, index, kvm_read_c0_guest_entryhi(cop0),
811 kvm_read_c0_guest_entrylo0(cop0),
812 kvm_read_c0_guest_entrylo1(cop0),
813 kvm_read_c0_guest_pagemask(cop0));
814 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
815 }
816
817 tlb = &vcpu->arch.guest_tlb[index];
818
819
820
821
822 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
823
824 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
825 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
826 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
827 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
828
829 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
830 pc, index, kvm_read_c0_guest_entryhi(cop0),
831 kvm_read_c0_guest_entrylo0(cop0),
832 kvm_read_c0_guest_entrylo1(cop0),
833 kvm_read_c0_guest_pagemask(cop0));
834
835 return EMULATE_DONE;
836}
837
838
839enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
840{
841 struct mips_coproc *cop0 = vcpu->arch.cop0;
842 struct kvm_mips_tlb *tlb = NULL;
843 uint32_t pc = vcpu->arch.pc;
844 int index;
845
846 get_random_bytes(&index, sizeof(index));
847 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
848
849 tlb = &vcpu->arch.guest_tlb[index];
850
851
852
853
854
855 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
856
857 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
858 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
859 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
860 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
861
862 kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
863 pc, index, kvm_read_c0_guest_entryhi(cop0),
864 kvm_read_c0_guest_entrylo0(cop0),
865 kvm_read_c0_guest_entrylo1(cop0));
866
867 return EMULATE_DONE;
868}
869
870enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
871{
872 struct mips_coproc *cop0 = vcpu->arch.cop0;
873 long entryhi = kvm_read_c0_guest_entryhi(cop0);
874 uint32_t pc = vcpu->arch.pc;
875 int index = -1;
876
877 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
878
879 kvm_write_c0_guest_index(cop0, index);
880
881 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
882 index);
883
884 return EMULATE_DONE;
885}
886
887
888
889
890
891
892
893
894unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
895{
896 unsigned int mask = 0;
897
898
899 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
900 mask |= MIPS_CONF1_FP;
901
902 return mask;
903}
904
905
906
907
908
909
910
911
912unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
913{
914
915 unsigned int mask = MIPS_CONF_M;
916
917
918 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
919 mask |= MIPS_CONF3_MSA;
920
921 return mask;
922}
923
924
925
926
927
928
929
930
931unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
932{
933
934 return MIPS_CONF_M;
935}
936
937
938
939
940
941
942
943
944unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
945{
946 unsigned int mask = 0;
947
948
949 if (kvm_mips_guest_has_msa(&vcpu->arch))
950 mask |= MIPS_CONF5_MSAEN;
951
952
953
954
955
956 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
957 if (cpu_has_fre)
958 mask |= MIPS_CONF5_FRE;
959
960 }
961
962 return mask;
963}
964
965enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
966 uint32_t cause, struct kvm_run *run,
967 struct kvm_vcpu *vcpu)
968{
969 struct mips_coproc *cop0 = vcpu->arch.cop0;
970 enum emulation_result er = EMULATE_DONE;
971 int32_t rt, rd, copz, sel, co_bit, op;
972 uint32_t pc = vcpu->arch.pc;
973 unsigned long curr_pc;
974
975
976
977
978
979 curr_pc = vcpu->arch.pc;
980 er = update_pc(vcpu, cause);
981 if (er == EMULATE_FAIL)
982 return er;
983
984 copz = (inst >> 21) & 0x1f;
985 rt = (inst >> 16) & 0x1f;
986 rd = (inst >> 11) & 0x1f;
987 sel = inst & 0x7;
988 co_bit = (inst >> 25) & 1;
989
990 if (co_bit) {
991 op = (inst) & 0xff;
992
993 switch (op) {
994 case tlbr_op:
995 er = kvm_mips_emul_tlbr(vcpu);
996 break;
997 case tlbwi_op:
998 er = kvm_mips_emul_tlbwi(vcpu);
999 break;
1000 case tlbwr_op:
1001 er = kvm_mips_emul_tlbwr(vcpu);
1002 break;
1003 case tlbp_op:
1004 er = kvm_mips_emul_tlbp(vcpu);
1005 break;
1006 case rfe_op:
1007 kvm_err("!!!COP0_RFE!!!\n");
1008 break;
1009 case eret_op:
1010 er = kvm_mips_emul_eret(vcpu);
1011 goto dont_update_pc;
1012 break;
1013 case wait_op:
1014 er = kvm_mips_emul_wait(vcpu);
1015 break;
1016 }
1017 } else {
1018 switch (copz) {
1019 case mfc_op:
1020#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1021 cop0->stat[rd][sel]++;
1022#endif
1023
1024 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1025 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
1026 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1027 vcpu->arch.gprs[rt] = 0x0;
1028#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1029 kvm_mips_trans_mfc0(inst, opc, vcpu);
1030#endif
1031 } else {
1032 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1033
1034#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1035 kvm_mips_trans_mfc0(inst, opc, vcpu);
1036#endif
1037 }
1038
1039 kvm_debug
1040 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
1041 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
1042
1043 break;
1044
1045 case dmfc_op:
1046 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1047 break;
1048
1049 case mtc_op:
1050#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1051 cop0->stat[rd][sel]++;
1052#endif
1053 if ((rd == MIPS_CP0_TLB_INDEX)
1054 && (vcpu->arch.gprs[rt] >=
1055 KVM_MIPS_GUEST_TLB_SIZE)) {
1056 kvm_err("Invalid TLB Index: %ld",
1057 vcpu->arch.gprs[rt]);
1058 er = EMULATE_FAIL;
1059 break;
1060 }
1061#define C0_EBASE_CORE_MASK 0xff
1062 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1063
1064 kvm_change_c0_guest_ebase(cop0,
1065 ~(C0_EBASE_CORE_MASK),
1066 vcpu->arch.gprs[rt]);
1067 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1068 kvm_read_c0_guest_ebase(cop0));
1069 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1070 uint32_t nasid =
1071 vcpu->arch.gprs[rt] & ASID_MASK;
1072 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
1073 ((kvm_read_c0_guest_entryhi(cop0) &
1074 ASID_MASK) != nasid)) {
1075 kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
1076 kvm_read_c0_guest_entryhi(cop0)
1077 & ASID_MASK,
1078 vcpu->arch.gprs[rt]
1079 & ASID_MASK);
1080
1081
1082 kvm_mips_flush_host_tlb(1);
1083 }
1084 kvm_write_c0_guest_entryhi(cop0,
1085 vcpu->arch.gprs[rt]);
1086 }
1087
1088 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1089 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1090 goto done;
1091 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1092 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
1093 pc, kvm_read_c0_guest_compare(cop0),
1094 vcpu->arch.gprs[rt]);
1095
1096
1097
1098 kvm_mips_callbacks->dequeue_timer_int(vcpu);
1099 kvm_mips_write_compare(vcpu,
1100 vcpu->arch.gprs[rt]);
1101 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1102 unsigned int old_val, val, change;
1103
1104 old_val = kvm_read_c0_guest_status(cop0);
1105 val = vcpu->arch.gprs[rt];
1106 change = val ^ old_val;
1107
1108
1109 val &= ~ST0_NMI;
1110
1111
1112
1113
1114
1115
1116 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1117 val &= ~(ST0_CU1 | ST0_FR);
1118
1119
1120
1121
1122
1123 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1124 val &= ~ST0_FR;
1125
1126
1127
1128 preempt_disable();
1129
1130
1131
1132
1133
1134
1135 if (change & ST0_FR)
1136 kvm_drop_fpu(vcpu);
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 if (change & ST0_CU1 && !(val & ST0_FR) &&
1147 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1148 kvm_lose_fpu(vcpu);
1149
1150
1151
1152
1153
1154
1155
1156
1157 if (change & ST0_CU1 &&
1158 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1159 change_c0_status(ST0_CU1, val);
1160
1161 preempt_enable();
1162
1163 kvm_write_c0_guest_status(cop0, val);
1164
1165#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1166
1167
1168
1169
1170 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1171 kvm_mips_trans_mtc0(inst, opc, vcpu);
1172#endif
1173 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1174 unsigned int old_val, val, change, wrmask;
1175
1176 old_val = kvm_read_c0_guest_config5(cop0);
1177 val = vcpu->arch.gprs[rt];
1178
1179
1180 wrmask = kvm_mips_config5_wrmask(vcpu);
1181 change = (val ^ old_val) & wrmask;
1182 val = old_val ^ change;
1183
1184
1185
1186 preempt_disable();
1187
1188
1189
1190
1191
1192 if (change & MIPS_CONF5_FRE &&
1193 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1194 change_c0_config5(MIPS_CONF5_FRE, val);
1195
1196
1197
1198
1199
1200
1201
1202 if (change & MIPS_CONF5_MSAEN &&
1203 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1204 change_c0_config5(MIPS_CONF5_MSAEN,
1205 val);
1206
1207 preempt_enable();
1208
1209 kvm_write_c0_guest_config5(cop0, val);
1210 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1211 uint32_t old_cause, new_cause;
1212
1213 old_cause = kvm_read_c0_guest_cause(cop0);
1214 new_cause = vcpu->arch.gprs[rt];
1215
1216 kvm_change_c0_guest_cause(cop0, 0x08800300,
1217 new_cause);
1218
1219 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1220 if (new_cause & CAUSEF_DC)
1221 kvm_mips_count_disable_cause(vcpu);
1222 else
1223 kvm_mips_count_enable_cause(vcpu);
1224 }
1225 } else {
1226 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1227#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1228 kvm_mips_trans_mtc0(inst, opc, vcpu);
1229#endif
1230 }
1231
1232 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
1233 rd, sel, cop0->reg[rd][sel]);
1234 break;
1235
1236 case dmtc_op:
1237 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1238 vcpu->arch.pc, rt, rd, sel);
1239 er = EMULATE_FAIL;
1240 break;
1241
1242 case mfmc0_op:
1243#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1244 cop0->stat[MIPS_CP0_STATUS][0]++;
1245#endif
1246 if (rt != 0)
1247 vcpu->arch.gprs[rt] =
1248 kvm_read_c0_guest_status(cop0);
1249
1250 if (inst & 0x20) {
1251 kvm_debug("[%#lx] mfmc0_op: EI\n",
1252 vcpu->arch.pc);
1253 kvm_set_c0_guest_status(cop0, ST0_IE);
1254 } else {
1255 kvm_debug("[%#lx] mfmc0_op: DI\n",
1256 vcpu->arch.pc);
1257 kvm_clear_c0_guest_status(cop0, ST0_IE);
1258 }
1259
1260 break;
1261
1262 case wrpgpr_op:
1263 {
1264 uint32_t css =
1265 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1266 uint32_t pss =
1267 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1268
1269
1270
1271
1272 if (css || pss) {
1273 er = EMULATE_FAIL;
1274 break;
1275 }
1276 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1277 vcpu->arch.gprs[rt]);
1278 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1279 }
1280 break;
1281 default:
1282 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1283 vcpu->arch.pc, copz);
1284 er = EMULATE_FAIL;
1285 break;
1286 }
1287 }
1288
1289done:
1290
1291 if (er == EMULATE_FAIL)
1292 vcpu->arch.pc = curr_pc;
1293
1294dont_update_pc:
1295
1296
1297
1298
1299
1300
1301 return er;
1302}
1303
1304enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1305 struct kvm_run *run,
1306 struct kvm_vcpu *vcpu)
1307{
1308 enum emulation_result er = EMULATE_DO_MMIO;
1309 int32_t op, base, rt, offset;
1310 uint32_t bytes;
1311 void *data = run->mmio.data;
1312 unsigned long curr_pc;
1313
1314
1315
1316
1317
1318 curr_pc = vcpu->arch.pc;
1319 er = update_pc(vcpu, cause);
1320 if (er == EMULATE_FAIL)
1321 return er;
1322
1323 rt = (inst >> 16) & 0x1f;
1324 base = (inst >> 21) & 0x1f;
1325 offset = inst & 0xffff;
1326 op = (inst >> 26) & 0x3f;
1327
1328 switch (op) {
1329 case sb_op:
1330 bytes = 1;
1331 if (bytes > sizeof(run->mmio.data)) {
1332 kvm_err("%s: bad MMIO length: %d\n", __func__,
1333 run->mmio.len);
1334 }
1335 run->mmio.phys_addr =
1336 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1337 host_cp0_badvaddr);
1338 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1339 er = EMULATE_FAIL;
1340 break;
1341 }
1342 run->mmio.len = bytes;
1343 run->mmio.is_write = 1;
1344 vcpu->mmio_needed = 1;
1345 vcpu->mmio_is_write = 1;
1346 *(u8 *) data = vcpu->arch.gprs[rt];
1347 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1348 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1349 *(uint8_t *) data);
1350
1351 break;
1352
1353 case sw_op:
1354 bytes = 4;
1355 if (bytes > sizeof(run->mmio.data)) {
1356 kvm_err("%s: bad MMIO length: %d\n", __func__,
1357 run->mmio.len);
1358 }
1359 run->mmio.phys_addr =
1360 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1361 host_cp0_badvaddr);
1362 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1363 er = EMULATE_FAIL;
1364 break;
1365 }
1366
1367 run->mmio.len = bytes;
1368 run->mmio.is_write = 1;
1369 vcpu->mmio_needed = 1;
1370 vcpu->mmio_is_write = 1;
1371 *(uint32_t *) data = vcpu->arch.gprs[rt];
1372
1373 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1374 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1375 vcpu->arch.gprs[rt], *(uint32_t *) data);
1376 break;
1377
1378 case sh_op:
1379 bytes = 2;
1380 if (bytes > sizeof(run->mmio.data)) {
1381 kvm_err("%s: bad MMIO length: %d\n", __func__,
1382 run->mmio.len);
1383 }
1384 run->mmio.phys_addr =
1385 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1386 host_cp0_badvaddr);
1387 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1388 er = EMULATE_FAIL;
1389 break;
1390 }
1391
1392 run->mmio.len = bytes;
1393 run->mmio.is_write = 1;
1394 vcpu->mmio_needed = 1;
1395 vcpu->mmio_is_write = 1;
1396 *(uint16_t *) data = vcpu->arch.gprs[rt];
1397
1398 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1399 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1400 vcpu->arch.gprs[rt], *(uint32_t *) data);
1401 break;
1402
1403 default:
1404 kvm_err("Store not yet supported");
1405 er = EMULATE_FAIL;
1406 break;
1407 }
1408
1409
1410 if (er == EMULATE_FAIL)
1411 vcpu->arch.pc = curr_pc;
1412
1413 return er;
1414}
1415
1416enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1417 struct kvm_run *run,
1418 struct kvm_vcpu *vcpu)
1419{
1420 enum emulation_result er = EMULATE_DO_MMIO;
1421 int32_t op, base, rt, offset;
1422 uint32_t bytes;
1423
1424 rt = (inst >> 16) & 0x1f;
1425 base = (inst >> 21) & 0x1f;
1426 offset = inst & 0xffff;
1427 op = (inst >> 26) & 0x3f;
1428
1429 vcpu->arch.pending_load_cause = cause;
1430 vcpu->arch.io_gpr = rt;
1431
1432 switch (op) {
1433 case lw_op:
1434 bytes = 4;
1435 if (bytes > sizeof(run->mmio.data)) {
1436 kvm_err("%s: bad MMIO length: %d\n", __func__,
1437 run->mmio.len);
1438 er = EMULATE_FAIL;
1439 break;
1440 }
1441 run->mmio.phys_addr =
1442 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1443 host_cp0_badvaddr);
1444 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1445 er = EMULATE_FAIL;
1446 break;
1447 }
1448
1449 run->mmio.len = bytes;
1450 run->mmio.is_write = 0;
1451 vcpu->mmio_needed = 1;
1452 vcpu->mmio_is_write = 0;
1453 break;
1454
1455 case lh_op:
1456 case lhu_op:
1457 bytes = 2;
1458 if (bytes > sizeof(run->mmio.data)) {
1459 kvm_err("%s: bad MMIO length: %d\n", __func__,
1460 run->mmio.len);
1461 er = EMULATE_FAIL;
1462 break;
1463 }
1464 run->mmio.phys_addr =
1465 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1466 host_cp0_badvaddr);
1467 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1468 er = EMULATE_FAIL;
1469 break;
1470 }
1471
1472 run->mmio.len = bytes;
1473 run->mmio.is_write = 0;
1474 vcpu->mmio_needed = 1;
1475 vcpu->mmio_is_write = 0;
1476
1477 if (op == lh_op)
1478 vcpu->mmio_needed = 2;
1479 else
1480 vcpu->mmio_needed = 1;
1481
1482 break;
1483
1484 case lbu_op:
1485 case lb_op:
1486 bytes = 1;
1487 if (bytes > sizeof(run->mmio.data)) {
1488 kvm_err("%s: bad MMIO length: %d\n", __func__,
1489 run->mmio.len);
1490 er = EMULATE_FAIL;
1491 break;
1492 }
1493 run->mmio.phys_addr =
1494 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1495 host_cp0_badvaddr);
1496 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1497 er = EMULATE_FAIL;
1498 break;
1499 }
1500
1501 run->mmio.len = bytes;
1502 run->mmio.is_write = 0;
1503 vcpu->mmio_is_write = 0;
1504
1505 if (op == lb_op)
1506 vcpu->mmio_needed = 2;
1507 else
1508 vcpu->mmio_needed = 1;
1509
1510 break;
1511
1512 default:
1513 kvm_err("Load not yet supported");
1514 er = EMULATE_FAIL;
1515 break;
1516 }
1517
1518 return er;
1519}
1520
1521int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1522{
1523 unsigned long offset = (va & ~PAGE_MASK);
1524 struct kvm *kvm = vcpu->kvm;
1525 unsigned long pa;
1526 gfn_t gfn;
1527 kvm_pfn_t pfn;
1528
1529 gfn = va >> PAGE_SHIFT;
1530
1531 if (gfn >= kvm->arch.guest_pmap_npages) {
1532 kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
1533 kvm_mips_dump_host_tlbs();
1534 kvm_arch_vcpu_dump_regs(vcpu);
1535 return -1;
1536 }
1537 pfn = kvm->arch.guest_pmap[gfn];
1538 pa = (pfn << PAGE_SHIFT) | offset;
1539
1540 kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
1541 CKSEG0ADDR(pa));
1542
1543 local_flush_icache_range(CKSEG0ADDR(pa), 32);
1544 return 0;
1545}
1546
1547enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1548 uint32_t cause,
1549 struct kvm_run *run,
1550 struct kvm_vcpu *vcpu)
1551{
1552 struct mips_coproc *cop0 = vcpu->arch.cop0;
1553 enum emulation_result er = EMULATE_DONE;
1554 int32_t offset, cache, op_inst, op, base;
1555 struct kvm_vcpu_arch *arch = &vcpu->arch;
1556 unsigned long va;
1557 unsigned long curr_pc;
1558
1559
1560
1561
1562
1563 curr_pc = vcpu->arch.pc;
1564 er = update_pc(vcpu, cause);
1565 if (er == EMULATE_FAIL)
1566 return er;
1567
1568 base = (inst >> 21) & 0x1f;
1569 op_inst = (inst >> 16) & 0x1f;
1570 offset = (int16_t)inst;
1571 cache = op_inst & CacheOp_Cache;
1572 op = op_inst & CacheOp_Op;
1573
1574 va = arch->gprs[base] + offset;
1575
1576 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1577 cache, op, base, arch->gprs[base], offset);
1578
1579
1580
1581
1582
1583
1584 if (op == Index_Writeback_Inv) {
1585 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1586 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1587 arch->gprs[base], offset);
1588
1589 if (cache == Cache_D)
1590 r4k_blast_dcache();
1591 else if (cache == Cache_I)
1592 r4k_blast_icache();
1593 else {
1594 kvm_err("%s: unsupported CACHE INDEX operation\n",
1595 __func__);
1596 return EMULATE_FAIL;
1597 }
1598
1599#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1600 kvm_mips_trans_cache_index(inst, opc, vcpu);
1601#endif
1602 goto done;
1603 }
1604
1605 preempt_disable();
1606 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1607 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
1608 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
1609 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1610 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1611 int index;
1612
1613
1614 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1615 goto skip_fault;
1616
1617
1618
1619
1620
1621 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1622 (kvm_read_c0_guest_entryhi
1623 (cop0) & ASID_MASK));
1624
1625 if (index < 0) {
1626 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1627 vcpu->arch.host_cp0_badvaddr = va;
1628 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1629 vcpu);
1630 preempt_enable();
1631 goto dont_update_pc;
1632 } else {
1633 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1634
1635
1636
1637
1638 if (!TLB_IS_VALID(*tlb, va)) {
1639 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1640 run, vcpu);
1641 preempt_enable();
1642 goto dont_update_pc;
1643 } else {
1644
1645
1646
1647
1648 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1649 NULL,
1650 NULL);
1651 }
1652 }
1653 } else {
1654 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1655 cache, op, base, arch->gprs[base], offset);
1656 er = EMULATE_FAIL;
1657 preempt_enable();
1658 goto dont_update_pc;
1659
1660 }
1661
1662skip_fault:
1663
1664 if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
1665 flush_dcache_line(va);
1666
1667#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1668
1669
1670
1671
1672 kvm_mips_trans_cache_va(inst, opc, vcpu);
1673#endif
1674 } else if (op_inst == Hit_Invalidate_I) {
1675 flush_dcache_line(va);
1676 flush_icache_line(va);
1677
1678#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1679
1680 kvm_mips_trans_cache_va(inst, opc, vcpu);
1681#endif
1682 } else {
1683 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1684 cache, op, base, arch->gprs[base], offset);
1685 er = EMULATE_FAIL;
1686 preempt_enable();
1687 goto dont_update_pc;
1688 }
1689
1690 preempt_enable();
1691
1692dont_update_pc:
1693
1694 vcpu->arch.pc = curr_pc;
1695done:
1696 return er;
1697}
1698
1699enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1700 struct kvm_run *run,
1701 struct kvm_vcpu *vcpu)
1702{
1703 enum emulation_result er = EMULATE_DONE;
1704 uint32_t inst;
1705
1706
1707 if (cause & CAUSEF_BD)
1708 opc += 1;
1709
1710 inst = kvm_get_inst(opc, vcpu);
1711
1712 switch (((union mips_instruction)inst).r_format.opcode) {
1713 case cop0_op:
1714 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1715 break;
1716 case sb_op:
1717 case sh_op:
1718 case sw_op:
1719 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1720 break;
1721 case lb_op:
1722 case lbu_op:
1723 case lhu_op:
1724 case lh_op:
1725 case lw_op:
1726 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1727 break;
1728
1729 case cache_op:
1730 ++vcpu->stat.cache_exits;
1731 trace_kvm_exit(vcpu, CACHE_EXITS);
1732 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1733 break;
1734
1735 default:
1736 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1737 inst);
1738 kvm_arch_vcpu_dump_regs(vcpu);
1739 er = EMULATE_FAIL;
1740 break;
1741 }
1742
1743 return er;
1744}
1745
1746enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
1747 uint32_t *opc,
1748 struct kvm_run *run,
1749 struct kvm_vcpu *vcpu)
1750{
1751 struct mips_coproc *cop0 = vcpu->arch.cop0;
1752 struct kvm_vcpu_arch *arch = &vcpu->arch;
1753 enum emulation_result er = EMULATE_DONE;
1754
1755 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1756
1757 kvm_write_c0_guest_epc(cop0, arch->pc);
1758 kvm_set_c0_guest_status(cop0, ST0_EXL);
1759
1760 if (cause & CAUSEF_BD)
1761 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1762 else
1763 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1764
1765 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1766
1767 kvm_change_c0_guest_cause(cop0, (0xff),
1768 (EXCCODE_SYS << CAUSEB_EXCCODE));
1769
1770
1771 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1772
1773 } else {
1774 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1775 er = EMULATE_FAIL;
1776 }
1777
1778 return er;
1779}
1780
1781enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
1782 uint32_t *opc,
1783 struct kvm_run *run,
1784 struct kvm_vcpu *vcpu)
1785{
1786 struct mips_coproc *cop0 = vcpu->arch.cop0;
1787 struct kvm_vcpu_arch *arch = &vcpu->arch;
1788 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1789 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1790
1791 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1792
1793 kvm_write_c0_guest_epc(cop0, arch->pc);
1794 kvm_set_c0_guest_status(cop0, ST0_EXL);
1795
1796 if (cause & CAUSEF_BD)
1797 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1798 else
1799 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1800
1801 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1802 arch->pc);
1803
1804
1805 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1806
1807 } else {
1808 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1809 arch->pc);
1810
1811 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1812 }
1813
1814 kvm_change_c0_guest_cause(cop0, (0xff),
1815 (EXCCODE_TLBL << CAUSEB_EXCCODE));
1816
1817
1818 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1819
1820 kvm_write_c0_guest_entryhi(cop0, entryhi);
1821
1822 kvm_mips_flush_host_tlb(1);
1823
1824 return EMULATE_DONE;
1825}
1826
1827enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
1828 uint32_t *opc,
1829 struct kvm_run *run,
1830 struct kvm_vcpu *vcpu)
1831{
1832 struct mips_coproc *cop0 = vcpu->arch.cop0;
1833 struct kvm_vcpu_arch *arch = &vcpu->arch;
1834 unsigned long entryhi =
1835 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1836 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1837
1838 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1839
1840 kvm_write_c0_guest_epc(cop0, arch->pc);
1841 kvm_set_c0_guest_status(cop0, ST0_EXL);
1842
1843 if (cause & CAUSEF_BD)
1844 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1845 else
1846 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1847
1848 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1849 arch->pc);
1850
1851
1852 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1853
1854 } else {
1855 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1856 arch->pc);
1857 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1858 }
1859
1860 kvm_change_c0_guest_cause(cop0, (0xff),
1861 (EXCCODE_TLBL << CAUSEB_EXCCODE));
1862
1863
1864 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1865
1866 kvm_write_c0_guest_entryhi(cop0, entryhi);
1867
1868 kvm_mips_flush_host_tlb(1);
1869
1870 return EMULATE_DONE;
1871}
1872
1873enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
1874 uint32_t *opc,
1875 struct kvm_run *run,
1876 struct kvm_vcpu *vcpu)
1877{
1878 struct mips_coproc *cop0 = vcpu->arch.cop0;
1879 struct kvm_vcpu_arch *arch = &vcpu->arch;
1880 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1881 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1882
1883 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1884
1885 kvm_write_c0_guest_epc(cop0, arch->pc);
1886 kvm_set_c0_guest_status(cop0, ST0_EXL);
1887
1888 if (cause & CAUSEF_BD)
1889 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1890 else
1891 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1892
1893 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1894 arch->pc);
1895
1896
1897 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1898 } else {
1899 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1900 arch->pc);
1901 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1902 }
1903
1904 kvm_change_c0_guest_cause(cop0, (0xff),
1905 (EXCCODE_TLBS << CAUSEB_EXCCODE));
1906
1907
1908 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1909
1910 kvm_write_c0_guest_entryhi(cop0, entryhi);
1911
1912 kvm_mips_flush_host_tlb(1);
1913
1914 return EMULATE_DONE;
1915}
1916
1917enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
1918 uint32_t *opc,
1919 struct kvm_run *run,
1920 struct kvm_vcpu *vcpu)
1921{
1922 struct mips_coproc *cop0 = vcpu->arch.cop0;
1923 struct kvm_vcpu_arch *arch = &vcpu->arch;
1924 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1925 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1926
1927 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1928
1929 kvm_write_c0_guest_epc(cop0, arch->pc);
1930 kvm_set_c0_guest_status(cop0, ST0_EXL);
1931
1932 if (cause & CAUSEF_BD)
1933 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1934 else
1935 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1936
1937 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1938 arch->pc);
1939
1940
1941 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1942 } else {
1943 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1944 arch->pc);
1945 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1946 }
1947
1948 kvm_change_c0_guest_cause(cop0, (0xff),
1949 (EXCCODE_TLBS << CAUSEB_EXCCODE));
1950
1951
1952 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1953
1954 kvm_write_c0_guest_entryhi(cop0, entryhi);
1955
1956 kvm_mips_flush_host_tlb(1);
1957
1958 return EMULATE_DONE;
1959}
1960
1961
1962enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1963 struct kvm_run *run,
1964 struct kvm_vcpu *vcpu)
1965{
1966 enum emulation_result er = EMULATE_DONE;
1967#ifdef DEBUG
1968 struct mips_coproc *cop0 = vcpu->arch.cop0;
1969 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1970 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1971 int index;
1972
1973
1974 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1975 if (index < 0) {
1976
1977 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1978 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1979 __func__, entryhi);
1980 kvm_mips_dump_guest_tlbs(vcpu);
1981 kvm_mips_dump_host_tlbs();
1982 return EMULATE_FAIL;
1983 }
1984#endif
1985
1986 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1987 return er;
1988}
1989
1990enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
1991 uint32_t *opc,
1992 struct kvm_run *run,
1993 struct kvm_vcpu *vcpu)
1994{
1995 struct mips_coproc *cop0 = vcpu->arch.cop0;
1996 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1997 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1998 struct kvm_vcpu_arch *arch = &vcpu->arch;
1999
2000 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2001
2002 kvm_write_c0_guest_epc(cop0, arch->pc);
2003 kvm_set_c0_guest_status(cop0, ST0_EXL);
2004
2005 if (cause & CAUSEF_BD)
2006 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2007 else
2008 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2009
2010 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2011 arch->pc);
2012
2013 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2014 } else {
2015 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2016 arch->pc);
2017 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2018 }
2019
2020 kvm_change_c0_guest_cause(cop0, (0xff),
2021 (EXCCODE_MOD << CAUSEB_EXCCODE));
2022
2023
2024 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2025
2026 kvm_write_c0_guest_entryhi(cop0, entryhi);
2027
2028 kvm_mips_flush_host_tlb(1);
2029
2030 return EMULATE_DONE;
2031}
2032
2033enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
2034 uint32_t *opc,
2035 struct kvm_run *run,
2036 struct kvm_vcpu *vcpu)
2037{
2038 struct mips_coproc *cop0 = vcpu->arch.cop0;
2039 struct kvm_vcpu_arch *arch = &vcpu->arch;
2040
2041 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2042
2043 kvm_write_c0_guest_epc(cop0, arch->pc);
2044 kvm_set_c0_guest_status(cop0, ST0_EXL);
2045
2046 if (cause & CAUSEF_BD)
2047 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2048 else
2049 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2050
2051 }
2052
2053 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2054
2055 kvm_change_c0_guest_cause(cop0, (0xff),
2056 (EXCCODE_CPU << CAUSEB_EXCCODE));
2057 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2058
2059 return EMULATE_DONE;
2060}
2061
2062enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
2063 uint32_t *opc,
2064 struct kvm_run *run,
2065 struct kvm_vcpu *vcpu)
2066{
2067 struct mips_coproc *cop0 = vcpu->arch.cop0;
2068 struct kvm_vcpu_arch *arch = &vcpu->arch;
2069 enum emulation_result er = EMULATE_DONE;
2070
2071 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2072
2073 kvm_write_c0_guest_epc(cop0, arch->pc);
2074 kvm_set_c0_guest_status(cop0, ST0_EXL);
2075
2076 if (cause & CAUSEF_BD)
2077 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2078 else
2079 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2080
2081 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2082
2083 kvm_change_c0_guest_cause(cop0, (0xff),
2084 (EXCCODE_RI << CAUSEB_EXCCODE));
2085
2086
2087 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2088
2089 } else {
2090 kvm_err("Trying to deliver RI when EXL is already set\n");
2091 er = EMULATE_FAIL;
2092 }
2093
2094 return er;
2095}
2096
2097enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
2098 uint32_t *opc,
2099 struct kvm_run *run,
2100 struct kvm_vcpu *vcpu)
2101{
2102 struct mips_coproc *cop0 = vcpu->arch.cop0;
2103 struct kvm_vcpu_arch *arch = &vcpu->arch;
2104 enum emulation_result er = EMULATE_DONE;
2105
2106 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2107
2108 kvm_write_c0_guest_epc(cop0, arch->pc);
2109 kvm_set_c0_guest_status(cop0, ST0_EXL);
2110
2111 if (cause & CAUSEF_BD)
2112 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2113 else
2114 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2115
2116 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2117
2118 kvm_change_c0_guest_cause(cop0, (0xff),
2119 (EXCCODE_BP << CAUSEB_EXCCODE));
2120
2121
2122 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2123
2124 } else {
2125 kvm_err("Trying to deliver BP when EXL is already set\n");
2126 er = EMULATE_FAIL;
2127 }
2128
2129 return er;
2130}
2131
2132enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
2133 uint32_t *opc,
2134 struct kvm_run *run,
2135 struct kvm_vcpu *vcpu)
2136{
2137 struct mips_coproc *cop0 = vcpu->arch.cop0;
2138 struct kvm_vcpu_arch *arch = &vcpu->arch;
2139 enum emulation_result er = EMULATE_DONE;
2140
2141 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2142
2143 kvm_write_c0_guest_epc(cop0, arch->pc);
2144 kvm_set_c0_guest_status(cop0, ST0_EXL);
2145
2146 if (cause & CAUSEF_BD)
2147 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2148 else
2149 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2150
2151 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2152
2153 kvm_change_c0_guest_cause(cop0, (0xff),
2154 (EXCCODE_TR << CAUSEB_EXCCODE));
2155
2156
2157 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2158
2159 } else {
2160 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2161 er = EMULATE_FAIL;
2162 }
2163
2164 return er;
2165}
2166
2167enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
2168 uint32_t *opc,
2169 struct kvm_run *run,
2170 struct kvm_vcpu *vcpu)
2171{
2172 struct mips_coproc *cop0 = vcpu->arch.cop0;
2173 struct kvm_vcpu_arch *arch = &vcpu->arch;
2174 enum emulation_result er = EMULATE_DONE;
2175
2176 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2177
2178 kvm_write_c0_guest_epc(cop0, arch->pc);
2179 kvm_set_c0_guest_status(cop0, ST0_EXL);
2180
2181 if (cause & CAUSEF_BD)
2182 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2183 else
2184 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2185
2186 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2187
2188 kvm_change_c0_guest_cause(cop0, (0xff),
2189 (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
2190
2191
2192 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2193
2194 } else {
2195 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2196 er = EMULATE_FAIL;
2197 }
2198
2199 return er;
2200}
2201
2202enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
2203 uint32_t *opc,
2204 struct kvm_run *run,
2205 struct kvm_vcpu *vcpu)
2206{
2207 struct mips_coproc *cop0 = vcpu->arch.cop0;
2208 struct kvm_vcpu_arch *arch = &vcpu->arch;
2209 enum emulation_result er = EMULATE_DONE;
2210
2211 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2212
2213 kvm_write_c0_guest_epc(cop0, arch->pc);
2214 kvm_set_c0_guest_status(cop0, ST0_EXL);
2215
2216 if (cause & CAUSEF_BD)
2217 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2218 else
2219 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2220
2221 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2222
2223 kvm_change_c0_guest_cause(cop0, (0xff),
2224 (EXCCODE_FPE << CAUSEB_EXCCODE));
2225
2226
2227 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2228
2229 } else {
2230 kvm_err("Trying to deliver FPE when EXL is already set\n");
2231 er = EMULATE_FAIL;
2232 }
2233
2234 return er;
2235}
2236
2237enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
2238 uint32_t *opc,
2239 struct kvm_run *run,
2240 struct kvm_vcpu *vcpu)
2241{
2242 struct mips_coproc *cop0 = vcpu->arch.cop0;
2243 struct kvm_vcpu_arch *arch = &vcpu->arch;
2244 enum emulation_result er = EMULATE_DONE;
2245
2246 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2247
2248 kvm_write_c0_guest_epc(cop0, arch->pc);
2249 kvm_set_c0_guest_status(cop0, ST0_EXL);
2250
2251 if (cause & CAUSEF_BD)
2252 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2253 else
2254 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2255
2256 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2257
2258 kvm_change_c0_guest_cause(cop0, (0xff),
2259 (EXCCODE_MSADIS << CAUSEB_EXCCODE));
2260
2261
2262 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2263
2264 } else {
2265 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2266 er = EMULATE_FAIL;
2267 }
2268
2269 return er;
2270}
2271
2272
2273
2274#define OPCODE 0xfc000000
2275#define BASE 0x03e00000
2276#define RT 0x001f0000
2277#define OFFSET 0x0000ffff
2278#define LL 0xc0000000
2279#define SC 0xe0000000
2280#define SPEC0 0x00000000
2281#define SPEC3 0x7c000000
2282#define RD 0x0000f800
2283#define FUNC 0x0000003f
2284#define SYNC 0x0000000f
2285#define RDHWR 0x0000003b
2286
2287enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
2288 struct kvm_run *run,
2289 struct kvm_vcpu *vcpu)
2290{
2291 struct mips_coproc *cop0 = vcpu->arch.cop0;
2292 struct kvm_vcpu_arch *arch = &vcpu->arch;
2293 enum emulation_result er = EMULATE_DONE;
2294 unsigned long curr_pc;
2295 uint32_t inst;
2296
2297
2298
2299
2300
2301 curr_pc = vcpu->arch.pc;
2302 er = update_pc(vcpu, cause);
2303 if (er == EMULATE_FAIL)
2304 return er;
2305
2306
2307 if (cause & CAUSEF_BD)
2308 opc += 1;
2309
2310 inst = kvm_get_inst(opc, vcpu);
2311
2312 if (inst == KVM_INVALID_INST) {
2313 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2314 return EMULATE_FAIL;
2315 }
2316
2317 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
2318 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2319 int rd = (inst & RD) >> 11;
2320 int rt = (inst & RT) >> 16;
2321
2322 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2323 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2324 rd, opc);
2325 goto emulate_ri;
2326 }
2327 switch (rd) {
2328 case 0:
2329 arch->gprs[rt] = 0;
2330 break;
2331 case 1:
2332 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2333 current_cpu_data.icache.linesz);
2334 break;
2335 case 2:
2336 arch->gprs[rt] = kvm_mips_read_count(vcpu);
2337 break;
2338 case 3:
2339 switch (current_cpu_data.cputype) {
2340 case CPU_20KC:
2341 case CPU_25KF:
2342 arch->gprs[rt] = 1;
2343 break;
2344 default:
2345 arch->gprs[rt] = 2;
2346 }
2347 break;
2348 case 29:
2349 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2350 break;
2351
2352 default:
2353 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2354 goto emulate_ri;
2355 }
2356 } else {
2357 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
2358 goto emulate_ri;
2359 }
2360
2361 return EMULATE_DONE;
2362
2363emulate_ri:
2364
2365
2366
2367
2368 vcpu->arch.pc = curr_pc;
2369 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2370}
2371
2372enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2373 struct kvm_run *run)
2374{
2375 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2376 enum emulation_result er = EMULATE_DONE;
2377
2378 if (run->mmio.len > sizeof(*gpr)) {
2379 kvm_err("Bad MMIO length: %d", run->mmio.len);
2380 er = EMULATE_FAIL;
2381 goto done;
2382 }
2383
2384 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2385 if (er == EMULATE_FAIL)
2386 return er;
2387
2388 switch (run->mmio.len) {
2389 case 4:
2390 *gpr = *(int32_t *) run->mmio.data;
2391 break;
2392
2393 case 2:
2394 if (vcpu->mmio_needed == 2)
2395 *gpr = *(int16_t *) run->mmio.data;
2396 else
2397 *gpr = *(uint16_t *)run->mmio.data;
2398
2399 break;
2400 case 1:
2401 if (vcpu->mmio_needed == 2)
2402 *gpr = *(int8_t *) run->mmio.data;
2403 else
2404 *gpr = *(u8 *) run->mmio.data;
2405 break;
2406 }
2407
2408 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2409 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2410 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2411 vcpu->mmio_needed);
2412
2413done:
2414 return er;
2415}
2416
2417static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
2418 uint32_t *opc,
2419 struct kvm_run *run,
2420 struct kvm_vcpu *vcpu)
2421{
2422 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2423 struct mips_coproc *cop0 = vcpu->arch.cop0;
2424 struct kvm_vcpu_arch *arch = &vcpu->arch;
2425 enum emulation_result er = EMULATE_DONE;
2426
2427 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2428
2429 kvm_write_c0_guest_epc(cop0, arch->pc);
2430 kvm_set_c0_guest_status(cop0, ST0_EXL);
2431
2432 if (cause & CAUSEF_BD)
2433 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2434 else
2435 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2436
2437 kvm_change_c0_guest_cause(cop0, (0xff),
2438 (exccode << CAUSEB_EXCCODE));
2439
2440
2441 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2442 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2443
2444 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2445 exccode, kvm_read_c0_guest_epc(cop0),
2446 kvm_read_c0_guest_badvaddr(cop0));
2447 } else {
2448 kvm_err("Trying to deliver EXC when EXL is already set\n");
2449 er = EMULATE_FAIL;
2450 }
2451
2452 return er;
2453}
2454
2455enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2456 uint32_t *opc,
2457 struct kvm_run *run,
2458 struct kvm_vcpu *vcpu)
2459{
2460 enum emulation_result er = EMULATE_DONE;
2461 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2462 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2463
2464 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2465
2466 if (usermode) {
2467 switch (exccode) {
2468 case EXCCODE_INT:
2469 case EXCCODE_SYS:
2470 case EXCCODE_BP:
2471 case EXCCODE_RI:
2472 case EXCCODE_TR:
2473 case EXCCODE_MSAFPE:
2474 case EXCCODE_FPE:
2475 case EXCCODE_MSADIS:
2476 break;
2477
2478 case EXCCODE_CPU:
2479 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2480 er = EMULATE_PRIV_FAIL;
2481 break;
2482
2483 case EXCCODE_MOD:
2484 break;
2485
2486 case EXCCODE_TLBL:
2487
2488
2489
2490
2491 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2492 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2493 badvaddr);
2494 cause &= ~0xff;
2495 cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
2496 er = EMULATE_PRIV_FAIL;
2497 }
2498 break;
2499
2500 case EXCCODE_TLBS:
2501
2502
2503
2504
2505 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2506 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2507 badvaddr);
2508 cause &= ~0xff;
2509 cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
2510 er = EMULATE_PRIV_FAIL;
2511 }
2512 break;
2513
2514 case EXCCODE_ADES:
2515 kvm_debug("%s: address error ST @ %#lx\n", __func__,
2516 badvaddr);
2517 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2518 cause &= ~0xff;
2519 cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
2520 }
2521 er = EMULATE_PRIV_FAIL;
2522 break;
2523 case EXCCODE_ADEL:
2524 kvm_debug("%s: address error LD @ %#lx\n", __func__,
2525 badvaddr);
2526 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2527 cause &= ~0xff;
2528 cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
2529 }
2530 er = EMULATE_PRIV_FAIL;
2531 break;
2532 default:
2533 er = EMULATE_PRIV_FAIL;
2534 break;
2535 }
2536 }
2537
2538 if (er == EMULATE_PRIV_FAIL)
2539 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2540
2541 return er;
2542}
2543
2544
2545
2546
2547
2548
2549
2550
2551enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
2552 uint32_t *opc,
2553 struct kvm_run *run,
2554 struct kvm_vcpu *vcpu)
2555{
2556 enum emulation_result er = EMULATE_DONE;
2557 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2558 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2559 int index;
2560
2561 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2562 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2563
2564
2565
2566
2567
2568
2569
2570 index = kvm_mips_guest_tlb_lookup(vcpu,
2571 (va & VPN2_MASK) |
2572 (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & ASID_MASK));
2573 if (index < 0) {
2574 if (exccode == EXCCODE_TLBL) {
2575 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2576 } else if (exccode == EXCCODE_TLBS) {
2577 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2578 } else {
2579 kvm_err("%s: invalid exc code: %d\n", __func__,
2580 exccode);
2581 er = EMULATE_FAIL;
2582 }
2583 } else {
2584 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2585
2586
2587
2588
2589
2590 if (!TLB_IS_VALID(*tlb, va)) {
2591 if (exccode == EXCCODE_TLBL) {
2592 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2593 vcpu);
2594 } else if (exccode == EXCCODE_TLBS) {
2595 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2596 vcpu);
2597 } else {
2598 kvm_err("%s: invalid exc code: %d\n", __func__,
2599 exccode);
2600 er = EMULATE_FAIL;
2601 }
2602 } else {
2603 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2604 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2605
2606
2607
2608
2609 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
2610 NULL);
2611 }
2612 }
2613
2614 return er;
2615}
2616