1
2
3
4
5
6
7
8
9
10
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/ktime.h>
15#include <linux/kvm_host.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/memblock.h>
19#include <linux/random.h>
20#include <asm/page.h>
21#include <asm/cacheflush.h>
22#include <asm/cacheops.h>
23#include <asm/cpu-info.h>
24#include <asm/mmu_context.h>
25#include <asm/tlbflush.h>
26#include <asm/inst.h>
27
28#undef CONFIG_MIPS_MT
29#include <asm/r4kcache.h>
30#define CONFIG_MIPS_MT
31
32#include "interrupt.h"
33
34#include "trace.h"
35
36
37
38
39
40static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
41 unsigned long *out)
42{
43 unsigned int dspcontrol;
44 union mips_instruction insn;
45 struct kvm_vcpu_arch *arch = &vcpu->arch;
46 long epc = instpc;
47 long nextpc;
48 int err;
49
50 if (epc & 3) {
51 kvm_err("%s: unaligned epc\n", __func__);
52 return -EINVAL;
53 }
54
55
56 err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
57 if (err)
58 return err;
59
60 switch (insn.i_format.opcode) {
61
62 case spec_op:
63 switch (insn.r_format.func) {
64 case jalr_op:
65 arch->gprs[insn.r_format.rd] = epc + 8;
66 fallthrough;
67 case jr_op:
68 nextpc = arch->gprs[insn.r_format.rs];
69 break;
70 default:
71 return -EINVAL;
72 }
73 break;
74
75
76
77
78
79
80 case bcond_op:
81 switch (insn.i_format.rt) {
82 case bltz_op:
83 case bltzl_op:
84 if ((long)arch->gprs[insn.i_format.rs] < 0)
85 epc = epc + 4 + (insn.i_format.simmediate << 2);
86 else
87 epc += 8;
88 nextpc = epc;
89 break;
90
91 case bgez_op:
92 case bgezl_op:
93 if ((long)arch->gprs[insn.i_format.rs] >= 0)
94 epc = epc + 4 + (insn.i_format.simmediate << 2);
95 else
96 epc += 8;
97 nextpc = epc;
98 break;
99
100 case bltzal_op:
101 case bltzall_op:
102 arch->gprs[31] = epc + 8;
103 if ((long)arch->gprs[insn.i_format.rs] < 0)
104 epc = epc + 4 + (insn.i_format.simmediate << 2);
105 else
106 epc += 8;
107 nextpc = epc;
108 break;
109
110 case bgezal_op:
111 case bgezall_op:
112 arch->gprs[31] = epc + 8;
113 if ((long)arch->gprs[insn.i_format.rs] >= 0)
114 epc = epc + 4 + (insn.i_format.simmediate << 2);
115 else
116 epc += 8;
117 nextpc = epc;
118 break;
119 case bposge32_op:
120 if (!cpu_has_dsp) {
121 kvm_err("%s: DSP branch but not DSP ASE\n",
122 __func__);
123 return -EINVAL;
124 }
125
126 dspcontrol = rddsp(0x01);
127
128 if (dspcontrol >= 32)
129 epc = epc + 4 + (insn.i_format.simmediate << 2);
130 else
131 epc += 8;
132 nextpc = epc;
133 break;
134 default:
135 return -EINVAL;
136 }
137 break;
138
139
140 case jal_op:
141 arch->gprs[31] = instpc + 8;
142 fallthrough;
143 case j_op:
144 epc += 4;
145 epc >>= 28;
146 epc <<= 28;
147 epc |= (insn.j_format.target << 2);
148 nextpc = epc;
149 break;
150
151
152 case beq_op:
153 case beql_op:
154 if (arch->gprs[insn.i_format.rs] ==
155 arch->gprs[insn.i_format.rt])
156 epc = epc + 4 + (insn.i_format.simmediate << 2);
157 else
158 epc += 8;
159 nextpc = epc;
160 break;
161
162 case bne_op:
163 case bnel_op:
164 if (arch->gprs[insn.i_format.rs] !=
165 arch->gprs[insn.i_format.rt])
166 epc = epc + 4 + (insn.i_format.simmediate << 2);
167 else
168 epc += 8;
169 nextpc = epc;
170 break;
171
172 case blez_op:
173#ifndef CONFIG_CPU_MIPSR6
174 case blezl_op:
175#endif
176 if (insn.i_format.rt != 0)
177 goto compact_branch;
178 if ((long)arch->gprs[insn.i_format.rs] <= 0)
179 epc = epc + 4 + (insn.i_format.simmediate << 2);
180 else
181 epc += 8;
182 nextpc = epc;
183 break;
184
185 case bgtz_op:
186#ifndef CONFIG_CPU_MIPSR6
187 case bgtzl_op:
188#endif
189 if (insn.i_format.rt != 0)
190 goto compact_branch;
191 if ((long)arch->gprs[insn.i_format.rs] > 0)
192 epc = epc + 4 + (insn.i_format.simmediate << 2);
193 else
194 epc += 8;
195 nextpc = epc;
196 break;
197
198
199 case cop1_op:
200 kvm_err("%s: unsupported cop1_op\n", __func__);
201 return -EINVAL;
202
203#ifdef CONFIG_CPU_MIPSR6
204
205 case blezl_op:
206 case bgtzl_op:
207
208 if (insn.i_format.rt != 0)
209 goto compact_branch;
210 return -EINVAL;
211 case pop10_op:
212 case pop30_op:
213
214 if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
215 goto compact_branch;
216 return -EINVAL;
217 case pop66_op:
218 case pop76_op:
219
220 if (insn.i_format.rs != 0)
221 goto compact_branch;
222 return -EINVAL;
223compact_branch:
224
225
226
227
228 epc += 8;
229 nextpc = epc;
230 break;
231#else
232compact_branch:
233
234#endif
235 default:
236 return -EINVAL;
237 }
238
239 *out = nextpc;
240 return 0;
241}
242
243enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
244{
245 int err;
246
247 if (cause & CAUSEF_BD) {
248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
249 &vcpu->arch.pc);
250 if (err)
251 return EMULATE_FAIL;
252 } else {
253 vcpu->arch.pc += 4;
254 }
255
256 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
257
258 return EMULATE_DONE;
259}
260
261
262
263
264
265
266
267
268
269
270
271
272int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
273{
274 if (cpu_has_badinstr) {
275 *out = vcpu->arch.host_cp0_badinstr;
276 return 0;
277 } else {
278 WARN_ONCE(1, "CPU doesn't have BadInstr register\n");
279 return -EINVAL;
280 }
281}
282
283
284
285
286
287
288
289
290
291
292
293
294int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
295{
296 if (cpu_has_badinstrp) {
297 *out = vcpu->arch.host_cp0_badinstrp;
298 return 0;
299 } else {
300 WARN_ONCE(1, "CPU doesn't have BadInstrp register\n");
301 return -EINVAL;
302 }
303}
304
305
306
307
308
309
310
311
312
313int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
314{
315 struct mips_coproc *cop0 = vcpu->arch.cop0;
316
317 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
318 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
319}
320
321
322
323
324
325
326
327
328static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
329{
330 s64 now_ns, periods;
331 u64 delta;
332
333 now_ns = ktime_to_ns(now);
334 delta = now_ns + vcpu->arch.count_dyn_bias;
335
336 if (delta >= vcpu->arch.count_period) {
337
338 periods = div64_s64(now_ns, vcpu->arch.count_period);
339 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
340
341 delta = now_ns + vcpu->arch.count_dyn_bias;
342 }
343
344
345
346
347
348
349
350
351
352
353
354 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
355}
356
357
358
359
360
361
362
363
364
365
366
367static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
368{
369 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
370 return vcpu->arch.count_resume;
371
372 return ktime_get();
373}
374
375
376
377
378
379
380
381
382
383
384
385static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
386{
387 struct mips_coproc *cop0 = vcpu->arch.cop0;
388 ktime_t expires, threshold;
389 u32 count, compare;
390 int running;
391
392
393 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
394 compare = kvm_read_c0_guest_compare(cop0);
395
396
397
398
399
400 if ((s32)(count - compare) < 0)
401 return count;
402
403
404
405
406
407
408
409 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
410 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
411 if (ktime_before(expires, threshold)) {
412
413
414
415
416 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
417
418
419 kvm_mips_callbacks->queue_timer_int(vcpu);
420
421
422
423
424
425 if (running) {
426 expires = ktime_add_ns(expires,
427 vcpu->arch.count_period);
428 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
429 HRTIMER_MODE_ABS);
430 }
431 }
432
433 return count;
434}
435
436
437
438
439
440
441
442
443
444
445u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
446{
447 struct mips_coproc *cop0 = vcpu->arch.cop0;
448
449
450 if (kvm_mips_count_disabled(vcpu))
451 return kvm_read_c0_guest_count(cop0);
452
453 return kvm_mips_read_count_running(vcpu, ktime_get());
454}
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
473{
474 ktime_t now;
475
476
477 hrtimer_cancel(&vcpu->arch.comparecount_timer);
478 now = ktime_get();
479
480
481 *count = kvm_mips_read_count_running(vcpu, now);
482
483 return now;
484}
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
503 ktime_t now, u32 count)
504{
505 struct mips_coproc *cop0 = vcpu->arch.cop0;
506 u32 compare;
507 u64 delta;
508 ktime_t expire;
509
510
511 compare = kvm_read_c0_guest_compare(cop0);
512 delta = (u64)(u32)(compare - count - 1) + 1;
513 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
514 expire = ktime_add_ns(now, delta);
515
516
517 hrtimer_cancel(&vcpu->arch.comparecount_timer);
518 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
519}
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
544 u32 count, int min_drift)
545{
546 ktime_t now, count_time;
547 u32 now_count, before_count;
548 u64 delta;
549 int drift, ret = 0;
550
551
552 before_count = vcpu->arch.count_bias +
553 kvm_mips_ktime_to_count(vcpu, before);
554
555
556
557
558
559
560
561
562 drift = count - before_count;
563 if (drift < min_drift) {
564 count_time = before;
565 vcpu->arch.count_bias += drift;
566 ret = drift;
567 goto resume;
568 }
569
570
571 now = ktime_get();
572 now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
573
574
575
576
577
578 drift = count - now_count;
579 if (drift > 0) {
580 count_time = now;
581 vcpu->arch.count_bias += drift;
582 ret = drift;
583 goto resume;
584 }
585
586
587 delta = (u64)(u32)(now_count - count);
588 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
589 count_time = ktime_sub_ns(now, delta);
590
591resume:
592
593 kvm_mips_resume_hrtimer(vcpu, count_time, count);
594 return ret;
595}
596
597
598
599
600
601
602
603
604void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
605{
606 struct mips_coproc *cop0 = vcpu->arch.cop0;
607 ktime_t now;
608
609
610 now = kvm_mips_count_time(vcpu);
611 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
612
613 if (kvm_mips_count_disabled(vcpu))
614
615 kvm_write_c0_guest_count(cop0, count);
616 else
617
618 kvm_mips_resume_hrtimer(vcpu, now, count);
619}
620
621
622
623
624
625
626
627
628
629void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
630{
631 vcpu->arch.count_hz = count_hz;
632 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
633 vcpu->arch.count_dyn_bias = 0;
634
635
636 kvm_mips_write_count(vcpu, 0);
637}
638
639
640
641
642
643
644
645
646
647
648
649
650int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
651{
652 struct mips_coproc *cop0 = vcpu->arch.cop0;
653 int dc;
654 ktime_t now;
655 u32 count;
656
657
658 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
659 return -EINVAL;
660
661 if (vcpu->arch.count_hz == count_hz)
662 return 0;
663
664
665 dc = kvm_mips_count_disabled(vcpu);
666 if (dc) {
667 now = kvm_mips_count_time(vcpu);
668 count = kvm_read_c0_guest_count(cop0);
669 } else {
670 now = kvm_mips_freeze_hrtimer(vcpu, &count);
671 }
672
673
674 vcpu->arch.count_hz = count_hz;
675 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
676 vcpu->arch.count_dyn_bias = 0;
677
678
679 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
680
681
682 if (!dc)
683 kvm_mips_resume_hrtimer(vcpu, now, count);
684 return 0;
685}
686
687
688
689
690
691
692
693
694
695
696
697void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
698{
699 struct mips_coproc *cop0 = vcpu->arch.cop0;
700 int dc;
701 u32 old_compare = kvm_read_c0_guest_compare(cop0);
702 s32 delta = compare - old_compare;
703 u32 cause;
704 ktime_t now = ktime_set(0, 0);
705 u32 count;
706
707
708 if (old_compare == compare) {
709 if (!ack)
710 return;
711 kvm_mips_callbacks->dequeue_timer_int(vcpu);
712 kvm_write_c0_guest_compare(cop0, compare);
713 return;
714 }
715
716
717
718
719
720
721
722
723
724
725 if (delta > 0) {
726 preempt_disable();
727 write_c0_gtoffset(compare - read_c0_count());
728 back_to_back_c0_hazard();
729 }
730
731
732 dc = kvm_mips_count_disabled(vcpu);
733 if (!dc)
734 now = kvm_mips_freeze_hrtimer(vcpu, &count);
735
736 if (ack)
737 kvm_mips_callbacks->dequeue_timer_int(vcpu);
738 else
739
740
741
742
743 cause = kvm_read_c0_guest_cause(cop0);
744
745 kvm_write_c0_guest_compare(cop0, compare);
746
747 if (delta > 0)
748 preempt_enable();
749
750 back_to_back_c0_hazard();
751
752 if (!ack && cause & CAUSEF_TI)
753 kvm_write_c0_guest_cause(cop0, cause);
754
755
756 if (!dc)
757 kvm_mips_resume_hrtimer(vcpu, now, count);
758
759
760
761
762
763
764 if (delta <= 0)
765 write_c0_gtoffset(compare - read_c0_count());
766}
767
768
769
770
771
772
773
774
775
776
777
778
779
780static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
781{
782 struct mips_coproc *cop0 = vcpu->arch.cop0;
783 u32 count;
784 ktime_t now;
785
786
787 hrtimer_cancel(&vcpu->arch.comparecount_timer);
788
789
790 now = ktime_get();
791 count = kvm_mips_read_count_running(vcpu, now);
792 kvm_write_c0_guest_count(cop0, count);
793
794 return now;
795}
796
797
798
799
800
801
802
803
804
805
806
807void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
808{
809 struct mips_coproc *cop0 = vcpu->arch.cop0;
810
811 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
812 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
813 kvm_mips_count_disable(vcpu);
814}
815
816
817
818
819
820
821
822
823
824
825
826
827void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
828{
829 struct mips_coproc *cop0 = vcpu->arch.cop0;
830 u32 count;
831
832 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
833
834
835
836
837
838
839 count = kvm_read_c0_guest_count(cop0);
840 kvm_mips_write_count(vcpu, count);
841}
842
843
844
845
846
847
848
849
850
851
852
853int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
854{
855 struct mips_coproc *cop0 = vcpu->arch.cop0;
856 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
857 s64 delta;
858 ktime_t expire, now;
859 u32 count, compare;
860
861
862 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
863 return -EINVAL;
864
865
866 vcpu->arch.count_ctl = count_ctl;
867
868
869 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
870
871 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
872 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
873
874 vcpu->arch.count_resume = ktime_get();
875 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
876
877 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
878 } else {
879
880
881
882
883 count = kvm_read_c0_guest_count(cop0);
884 compare = kvm_read_c0_guest_compare(cop0);
885 delta = (u64)(u32)(compare - count - 1) + 1;
886 delta = div_u64(delta * NSEC_PER_SEC,
887 vcpu->arch.count_hz);
888 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
889
890
891 now = ktime_get();
892 if (ktime_compare(now, expire) >= 0)
893
894 kvm_mips_callbacks->queue_timer_int(vcpu);
895
896
897 count = kvm_mips_read_count_running(vcpu, now);
898 kvm_mips_resume_hrtimer(vcpu, now, count);
899 }
900 }
901
902 return 0;
903}
904
905
906
907
908
909
910
911
912
913
914
915int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
916{
917
918
919
920
921
922 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
923 return -EINVAL;
924
925 vcpu->arch.count_resume = ns_to_ktime(count_resume);
926 return 0;
927}
928
929
930
931
932
933
934
935
936
937enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
938{
939
940 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
941 vcpu->arch.count_period);
942 return HRTIMER_RESTART;
943}
944
945enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
946{
947 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
948 vcpu->arch.pending_exceptions);
949
950 ++vcpu->stat.wait_exits;
951 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
952 if (!vcpu->arch.pending_exceptions) {
953 kvm_vz_lose_htimer(vcpu);
954 vcpu->arch.wait = 1;
955 kvm_vcpu_block(vcpu);
956
957
958
959
960
961 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
962 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
963 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
964 }
965 }
966
967 return EMULATE_DONE;
968}
969
970enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
971 u32 cause,
972 struct kvm_vcpu *vcpu)
973{
974 int r;
975 enum emulation_result er;
976 u32 rt;
977 struct kvm_run *run = vcpu->run;
978 void *data = run->mmio.data;
979 unsigned int imme;
980 unsigned long curr_pc;
981
982
983
984
985
986 curr_pc = vcpu->arch.pc;
987 er = update_pc(vcpu, cause);
988 if (er == EMULATE_FAIL)
989 return er;
990
991 rt = inst.i_format.rt;
992
993 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
994 vcpu->arch.host_cp0_badvaddr);
995 if (run->mmio.phys_addr == KVM_INVALID_ADDR)
996 goto out_fail;
997
998 switch (inst.i_format.opcode) {
999#if defined(CONFIG_64BIT)
1000 case sd_op:
1001 run->mmio.len = 8;
1002 *(u64 *)data = vcpu->arch.gprs[rt];
1003
1004 kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
1005 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1006 vcpu->arch.gprs[rt], *(u64 *)data);
1007 break;
1008#endif
1009
1010 case sw_op:
1011 run->mmio.len = 4;
1012 *(u32 *)data = vcpu->arch.gprs[rt];
1013
1014 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1015 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1016 vcpu->arch.gprs[rt], *(u32 *)data);
1017 break;
1018
1019 case sh_op:
1020 run->mmio.len = 2;
1021 *(u16 *)data = vcpu->arch.gprs[rt];
1022
1023 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1024 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1025 vcpu->arch.gprs[rt], *(u16 *)data);
1026 break;
1027
1028 case sb_op:
1029 run->mmio.len = 1;
1030 *(u8 *)data = vcpu->arch.gprs[rt];
1031
1032 kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1033 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1034 vcpu->arch.gprs[rt], *(u8 *)data);
1035 break;
1036
1037 case swl_op:
1038 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1039 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1040 run->mmio.len = 4;
1041 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1042 switch (imme) {
1043 case 0:
1044 *(u32 *)data = ((*(u32 *)data) & 0xffffff00) |
1045 (vcpu->arch.gprs[rt] >> 24);
1046 break;
1047 case 1:
1048 *(u32 *)data = ((*(u32 *)data) & 0xffff0000) |
1049 (vcpu->arch.gprs[rt] >> 16);
1050 break;
1051 case 2:
1052 *(u32 *)data = ((*(u32 *)data) & 0xff000000) |
1053 (vcpu->arch.gprs[rt] >> 8);
1054 break;
1055 case 3:
1056 *(u32 *)data = vcpu->arch.gprs[rt];
1057 break;
1058 default:
1059 break;
1060 }
1061
1062 kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1063 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1064 vcpu->arch.gprs[rt], *(u32 *)data);
1065 break;
1066
1067 case swr_op:
1068 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1069 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1070 run->mmio.len = 4;
1071 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1072 switch (imme) {
1073 case 0:
1074 *(u32 *)data = vcpu->arch.gprs[rt];
1075 break;
1076 case 1:
1077 *(u32 *)data = ((*(u32 *)data) & 0xff) |
1078 (vcpu->arch.gprs[rt] << 8);
1079 break;
1080 case 2:
1081 *(u32 *)data = ((*(u32 *)data) & 0xffff) |
1082 (vcpu->arch.gprs[rt] << 16);
1083 break;
1084 case 3:
1085 *(u32 *)data = ((*(u32 *)data) & 0xffffff) |
1086 (vcpu->arch.gprs[rt] << 24);
1087 break;
1088 default:
1089 break;
1090 }
1091
1092 kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1093 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1094 vcpu->arch.gprs[rt], *(u32 *)data);
1095 break;
1096
1097#if defined(CONFIG_64BIT)
1098 case sdl_op:
1099 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1100 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1101
1102 run->mmio.len = 8;
1103 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1104 switch (imme) {
1105 case 0:
1106 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) |
1107 ((vcpu->arch.gprs[rt] >> 56) & 0xff);
1108 break;
1109 case 1:
1110 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) |
1111 ((vcpu->arch.gprs[rt] >> 48) & 0xffff);
1112 break;
1113 case 2:
1114 *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) |
1115 ((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
1116 break;
1117 case 3:
1118 *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) |
1119 ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
1120 break;
1121 case 4:
1122 *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) |
1123 ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
1124 break;
1125 case 5:
1126 *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) |
1127 ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
1128 break;
1129 case 6:
1130 *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) |
1131 ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
1132 break;
1133 case 7:
1134 *(u64 *)data = vcpu->arch.gprs[rt];
1135 break;
1136 default:
1137 break;
1138 }
1139
1140 kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n",
1141 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1142 vcpu->arch.gprs[rt], *(u64 *)data);
1143 break;
1144
1145 case sdr_op:
1146 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1147 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1148
1149 run->mmio.len = 8;
1150 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1151 switch (imme) {
1152 case 0:
1153 *(u64 *)data = vcpu->arch.gprs[rt];
1154 break;
1155 case 1:
1156 *(u64 *)data = ((*(u64 *)data) & 0xff) |
1157 (vcpu->arch.gprs[rt] << 8);
1158 break;
1159 case 2:
1160 *(u64 *)data = ((*(u64 *)data) & 0xffff) |
1161 (vcpu->arch.gprs[rt] << 16);
1162 break;
1163 case 3:
1164 *(u64 *)data = ((*(u64 *)data) & 0xffffff) |
1165 (vcpu->arch.gprs[rt] << 24);
1166 break;
1167 case 4:
1168 *(u64 *)data = ((*(u64 *)data) & 0xffffffff) |
1169 (vcpu->arch.gprs[rt] << 32);
1170 break;
1171 case 5:
1172 *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) |
1173 (vcpu->arch.gprs[rt] << 40);
1174 break;
1175 case 6:
1176 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) |
1177 (vcpu->arch.gprs[rt] << 48);
1178 break;
1179 case 7:
1180 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) |
1181 (vcpu->arch.gprs[rt] << 56);
1182 break;
1183 default:
1184 break;
1185 }
1186
1187 kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n",
1188 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1189 vcpu->arch.gprs[rt], *(u64 *)data);
1190 break;
1191#endif
1192
1193#ifdef CONFIG_CPU_LOONGSON64
1194 case sdc2_op:
1195 rt = inst.loongson3_lsdc2_format.rt;
1196 switch (inst.loongson3_lsdc2_format.opcode1) {
1197
1198
1199
1200
1201
1202
1203
1204
1205 case 0x0:
1206 run->mmio.len = 1;
1207 *(u8 *)data = vcpu->arch.gprs[rt];
1208
1209 kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1210 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1211 vcpu->arch.gprs[rt], *(u8 *)data);
1212 break;
1213 case 0x1:
1214 run->mmio.len = 2;
1215 *(u16 *)data = vcpu->arch.gprs[rt];
1216
1217 kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1218 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1219 vcpu->arch.gprs[rt], *(u16 *)data);
1220 break;
1221 case 0x2:
1222 run->mmio.len = 4;
1223 *(u32 *)data = vcpu->arch.gprs[rt];
1224
1225 kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1226 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1227 vcpu->arch.gprs[rt], *(u32 *)data);
1228 break;
1229 case 0x3:
1230 run->mmio.len = 8;
1231 *(u64 *)data = vcpu->arch.gprs[rt];
1232
1233 kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
1234 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1235 vcpu->arch.gprs[rt], *(u64 *)data);
1236 break;
1237 default:
1238 kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n",
1239 inst.word);
1240 break;
1241 }
1242 break;
1243#endif
1244 default:
1245 kvm_err("Store not yet supported (inst=0x%08x)\n",
1246 inst.word);
1247 goto out_fail;
1248 }
1249
1250 vcpu->mmio_needed = 1;
1251 run->mmio.is_write = 1;
1252 vcpu->mmio_is_write = 1;
1253
1254 r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
1255 run->mmio.phys_addr, run->mmio.len, data);
1256
1257 if (!r) {
1258 vcpu->mmio_needed = 0;
1259 return EMULATE_DONE;
1260 }
1261
1262 return EMULATE_DO_MMIO;
1263
1264out_fail:
1265
1266 vcpu->arch.pc = curr_pc;
1267 return EMULATE_FAIL;
1268}
1269
1270enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1271 u32 cause, struct kvm_vcpu *vcpu)
1272{
1273 struct kvm_run *run = vcpu->run;
1274 int r;
1275 enum emulation_result er;
1276 unsigned long curr_pc;
1277 u32 op, rt;
1278 unsigned int imme;
1279
1280 rt = inst.i_format.rt;
1281 op = inst.i_format.opcode;
1282
1283
1284
1285
1286
1287
1288 curr_pc = vcpu->arch.pc;
1289 er = update_pc(vcpu, cause);
1290 if (er == EMULATE_FAIL)
1291 return er;
1292 vcpu->arch.io_pc = vcpu->arch.pc;
1293 vcpu->arch.pc = curr_pc;
1294
1295 vcpu->arch.io_gpr = rt;
1296
1297 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1298 vcpu->arch.host_cp0_badvaddr);
1299 if (run->mmio.phys_addr == KVM_INVALID_ADDR)
1300 return EMULATE_FAIL;
1301
1302 vcpu->mmio_needed = 2;
1303 switch (op) {
1304#if defined(CONFIG_64BIT)
1305 case ld_op:
1306 run->mmio.len = 8;
1307 break;
1308
1309 case lwu_op:
1310 vcpu->mmio_needed = 1;
1311 fallthrough;
1312#endif
1313 case lw_op:
1314 run->mmio.len = 4;
1315 break;
1316
1317 case lhu_op:
1318 vcpu->mmio_needed = 1;
1319 fallthrough;
1320 case lh_op:
1321 run->mmio.len = 2;
1322 break;
1323
1324 case lbu_op:
1325 vcpu->mmio_needed = 1;
1326 fallthrough;
1327 case lb_op:
1328 run->mmio.len = 1;
1329 break;
1330
1331 case lwl_op:
1332 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1333 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1334
1335 run->mmio.len = 4;
1336 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1337 switch (imme) {
1338 case 0:
1339 vcpu->mmio_needed = 3;
1340 break;
1341 case 1:
1342 vcpu->mmio_needed = 4;
1343 break;
1344 case 2:
1345 vcpu->mmio_needed = 5;
1346 break;
1347 case 3:
1348 vcpu->mmio_needed = 6;
1349 break;
1350 default:
1351 break;
1352 }
1353 break;
1354
1355 case lwr_op:
1356 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1357 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1358
1359 run->mmio.len = 4;
1360 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1361 switch (imme) {
1362 case 0:
1363 vcpu->mmio_needed = 7;
1364 break;
1365 case 1:
1366 vcpu->mmio_needed = 8;
1367 break;
1368 case 2:
1369 vcpu->mmio_needed = 9;
1370 break;
1371 case 3:
1372 vcpu->mmio_needed = 10;
1373 break;
1374 default:
1375 break;
1376 }
1377 break;
1378
1379#if defined(CONFIG_64BIT)
1380 case ldl_op:
1381 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1382 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1383
1384 run->mmio.len = 8;
1385 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1386 switch (imme) {
1387 case 0:
1388 vcpu->mmio_needed = 11;
1389 break;
1390 case 1:
1391 vcpu->mmio_needed = 12;
1392 break;
1393 case 2:
1394 vcpu->mmio_needed = 13;
1395 break;
1396 case 3:
1397 vcpu->mmio_needed = 14;
1398 break;
1399 case 4:
1400 vcpu->mmio_needed = 15;
1401 break;
1402 case 5:
1403 vcpu->mmio_needed = 16;
1404 break;
1405 case 6:
1406 vcpu->mmio_needed = 17;
1407 break;
1408 case 7:
1409 vcpu->mmio_needed = 18;
1410 break;
1411 default:
1412 break;
1413 }
1414 break;
1415
1416 case ldr_op:
1417 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1418 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1419
1420 run->mmio.len = 8;
1421 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1422 switch (imme) {
1423 case 0:
1424 vcpu->mmio_needed = 19;
1425 break;
1426 case 1:
1427 vcpu->mmio_needed = 20;
1428 break;
1429 case 2:
1430 vcpu->mmio_needed = 21;
1431 break;
1432 case 3:
1433 vcpu->mmio_needed = 22;
1434 break;
1435 case 4:
1436 vcpu->mmio_needed = 23;
1437 break;
1438 case 5:
1439 vcpu->mmio_needed = 24;
1440 break;
1441 case 6:
1442 vcpu->mmio_needed = 25;
1443 break;
1444 case 7:
1445 vcpu->mmio_needed = 26;
1446 break;
1447 default:
1448 break;
1449 }
1450 break;
1451#endif
1452
1453#ifdef CONFIG_CPU_LOONGSON64
1454 case ldc2_op:
1455 rt = inst.loongson3_lsdc2_format.rt;
1456 switch (inst.loongson3_lsdc2_format.opcode1) {
1457
1458
1459
1460
1461
1462
1463
1464
1465 case 0x0:
1466 run->mmio.len = 1;
1467 vcpu->mmio_needed = 27;
1468 break;
1469 case 0x1:
1470 run->mmio.len = 2;
1471 vcpu->mmio_needed = 28;
1472 break;
1473 case 0x2:
1474 run->mmio.len = 4;
1475 vcpu->mmio_needed = 29;
1476 break;
1477 case 0x3:
1478 run->mmio.len = 8;
1479 vcpu->mmio_needed = 30;
1480 break;
1481 default:
1482 kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n",
1483 inst.word);
1484 break;
1485 }
1486 break;
1487#endif
1488
1489 default:
1490 kvm_err("Load not yet supported (inst=0x%08x)\n",
1491 inst.word);
1492 vcpu->mmio_needed = 0;
1493 return EMULATE_FAIL;
1494 }
1495
1496 run->mmio.is_write = 0;
1497 vcpu->mmio_is_write = 0;
1498
1499 r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
1500 run->mmio.phys_addr, run->mmio.len, run->mmio.data);
1501
1502 if (!r) {
1503 kvm_mips_complete_mmio_load(vcpu);
1504 vcpu->mmio_needed = 0;
1505 return EMULATE_DONE;
1506 }
1507
1508 return EMULATE_DO_MMIO;
1509}
1510
1511enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
1512{
1513 struct kvm_run *run = vcpu->run;
1514 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1515 enum emulation_result er = EMULATE_DONE;
1516
1517 if (run->mmio.len > sizeof(*gpr)) {
1518 kvm_err("Bad MMIO length: %d", run->mmio.len);
1519 er = EMULATE_FAIL;
1520 goto done;
1521 }
1522
1523
1524 vcpu->arch.pc = vcpu->arch.io_pc;
1525
1526 switch (run->mmio.len) {
1527 case 8:
1528 switch (vcpu->mmio_needed) {
1529 case 11:
1530 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
1531 (((*(s64 *)run->mmio.data) & 0xff) << 56);
1532 break;
1533 case 12:
1534 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
1535 (((*(s64 *)run->mmio.data) & 0xffff) << 48);
1536 break;
1537 case 13:
1538 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
1539 (((*(s64 *)run->mmio.data) & 0xffffff) << 40);
1540 break;
1541 case 14:
1542 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
1543 (((*(s64 *)run->mmio.data) & 0xffffffff) << 32);
1544 break;
1545 case 15:
1546 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
1547 (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24);
1548 break;
1549 case 16:
1550 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
1551 (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16);
1552 break;
1553 case 17:
1554 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
1555 (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8);
1556 break;
1557 case 18:
1558 case 19:
1559 *gpr = *(s64 *)run->mmio.data;
1560 break;
1561 case 20:
1562 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
1563 ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff);
1564 break;
1565 case 21:
1566 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
1567 ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff);
1568 break;
1569 case 22:
1570 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
1571 ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff);
1572 break;
1573 case 23:
1574 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
1575 ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff);
1576 break;
1577 case 24:
1578 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
1579 ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff);
1580 break;
1581 case 25:
1582 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
1583 ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff);
1584 break;
1585 case 26:
1586 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
1587 ((((*(s64 *)run->mmio.data)) >> 56) & 0xff);
1588 break;
1589 default:
1590 *gpr = *(s64 *)run->mmio.data;
1591 }
1592 break;
1593
1594 case 4:
1595 switch (vcpu->mmio_needed) {
1596 case 1:
1597 *gpr = *(u32 *)run->mmio.data;
1598 break;
1599 case 2:
1600 *gpr = *(s32 *)run->mmio.data;
1601 break;
1602 case 3:
1603 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
1604 (((*(s32 *)run->mmio.data) & 0xff) << 24);
1605 break;
1606 case 4:
1607 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
1608 (((*(s32 *)run->mmio.data) & 0xffff) << 16);
1609 break;
1610 case 5:
1611 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
1612 (((*(s32 *)run->mmio.data) & 0xffffff) << 8);
1613 break;
1614 case 6:
1615 case 7:
1616 *gpr = *(s32 *)run->mmio.data;
1617 break;
1618 case 8:
1619 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
1620 ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff);
1621 break;
1622 case 9:
1623 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
1624 ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff);
1625 break;
1626 case 10:
1627 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
1628 ((((*(s32 *)run->mmio.data)) >> 24) & 0xff);
1629 break;
1630 default:
1631 *gpr = *(s32 *)run->mmio.data;
1632 }
1633 break;
1634
1635 case 2:
1636 if (vcpu->mmio_needed == 1)
1637 *gpr = *(u16 *)run->mmio.data;
1638 else
1639 *gpr = *(s16 *)run->mmio.data;
1640
1641 break;
1642 case 1:
1643 if (vcpu->mmio_needed == 1)
1644 *gpr = *(u8 *)run->mmio.data;
1645 else
1646 *gpr = *(s8 *)run->mmio.data;
1647 break;
1648 }
1649
1650done:
1651 return er;
1652}
1653