1
2
3
4
5
6
7
8
9
10
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/preempt.h>
16#include <linux/vmalloc.h>
17#include <asm/cacheflush.h>
18#include <asm/cacheops.h>
19#include <asm/cmpxchg.h>
20#include <asm/fpu.h>
21#include <asm/hazards.h>
22#include <asm/inst.h>
23#include <asm/mmu_context.h>
24#include <asm/r4kcache.h>
25#include <asm/time.h>
26#include <asm/tlb.h>
27#include <asm/tlbex.h>
28
29#include <linux/kvm_host.h>
30
31#include "interrupt.h"
32#ifdef CONFIG_CPU_LOONGSON64
33#include "loongson_regs.h"
34#endif
35
36#include "trace.h"
37
38
39static struct kvm_vcpu *last_vcpu[NR_CPUS];
40
41static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
42
43
44
45
46
47static unsigned int kvm_vz_guest_vtlb_size;
48
49static inline long kvm_vz_read_gc0_ebase(void)
50{
51 if (sizeof(long) == 8 && cpu_has_ebase_wg)
52 return read_gc0_ebase_64();
53 else
54 return read_gc0_ebase();
55}
56
57static inline void kvm_vz_write_gc0_ebase(long v)
58{
59
60
61
62
63
64 if (sizeof(long) == 8 &&
65 (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
66 write_gc0_ebase_64(v | MIPS_EBASE_WG);
67 write_gc0_ebase_64(v);
68 } else {
69 write_gc0_ebase(v | MIPS_EBASE_WG);
70 write_gc0_ebase(v);
71 }
72}
73
74
75
76
77
78
79
80
81
82
83
84static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
85{
86 return CONF_CM_CMASK;
87}
88
89static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
90{
91 return 0;
92}
93
94static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
95{
96 return 0;
97}
98
99static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
100{
101 return MIPS_CONF3_ISA_OE;
102}
103
104static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
105{
106
107 return MIPS_CONF4_VFTLBPAGESIZE;
108}
109
110static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
111{
112 unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
113
114
115 if (kvm_mips_guest_has_msa(&vcpu->arch))
116 mask |= MIPS_CONF5_MSAEN;
117
118
119
120
121
122 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
123 if (cpu_has_ufr)
124 mask |= MIPS_CONF5_UFR;
125 if (cpu_has_fre)
126 mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
127 }
128
129 return mask;
130}
131
132static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
133{
134 return LOONGSON_CONF6_INTIMER | LOONGSON_CONF6_EXTIMER;
135}
136
137
138
139
140
141
142
143
144
145
146
147
148static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
149{
150 return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
151}
152
153static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
154{
155 unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
156
157
158 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
159 mask |= MIPS_CONF1_FP;
160
161 return mask;
162}
163
164static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
165{
166 return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
167}
168
169static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
170{
171 unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
172 MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
173
174
175 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
176 mask |= MIPS_CONF3_MSA;
177
178 return mask;
179}
180
181static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
182{
183 return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
184}
185
186static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
187{
188 return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
189}
190
191static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
192{
193 return kvm_vz_config6_guest_wrmask(vcpu) |
194 LOONGSON_CONF6_SFBEN | LOONGSON_CONF6_FTLBDIS;
195}
196
197static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
198{
199
200 return gva;
201}
202
203static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
204{
205 set_bit(priority, &vcpu->arch.pending_exceptions);
206 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
207}
208
209static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
210{
211 clear_bit(priority, &vcpu->arch.pending_exceptions);
212 set_bit(priority, &vcpu->arch.pending_exceptions_clr);
213}
214
215static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
216{
217
218
219
220
221 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
222}
223
224static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
225{
226
227
228
229
230 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
231}
232
233static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
234 struct kvm_mips_interrupt *irq)
235{
236 int intr = (int)irq->irq;
237
238
239
240
241
242 kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
243}
244
245static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
246 struct kvm_mips_interrupt *irq)
247{
248 int intr = (int)irq->irq;
249
250
251
252
253
254 kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
255}
256
257static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
258 u32 cause)
259{
260 u32 irq = (priority < MIPS_EXC_MAX) ?
261 kvm_priority_to_irq[priority] : 0;
262
263 switch (priority) {
264 case MIPS_EXC_INT_TIMER:
265 set_gc0_cause(C_TI);
266 break;
267
268 case MIPS_EXC_INT_IO_1:
269 case MIPS_EXC_INT_IO_2:
270 case MIPS_EXC_INT_IPI_1:
271 case MIPS_EXC_INT_IPI_2:
272 if (cpu_has_guestctl2)
273 set_c0_guestctl2(irq);
274 else
275 set_gc0_cause(irq);
276 break;
277
278 default:
279 break;
280 }
281
282 clear_bit(priority, &vcpu->arch.pending_exceptions);
283 return 1;
284}
285
286static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
287 u32 cause)
288{
289 u32 irq = (priority < MIPS_EXC_MAX) ?
290 kvm_priority_to_irq[priority] : 0;
291
292 switch (priority) {
293 case MIPS_EXC_INT_TIMER:
294
295
296
297
298
299
300 if (cpu_has_guestctl2) {
301 if (!(read_c0_guestctl2() & (irq << 14)))
302 clear_c0_guestctl2(irq);
303 } else {
304 clear_gc0_cause(irq);
305 }
306 break;
307
308 case MIPS_EXC_INT_IO_1:
309 case MIPS_EXC_INT_IO_2:
310 case MIPS_EXC_INT_IPI_1:
311 case MIPS_EXC_INT_IPI_2:
312
313 if (cpu_has_guestctl2) {
314 if (!(read_c0_guestctl2() & (irq << 14)))
315 clear_c0_guestctl2(irq);
316 } else {
317 clear_gc0_cause(irq);
318 }
319 break;
320
321 default:
322 break;
323 }
324
325 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
326 return 1;
327}
328
329
330
331
332
333
334
335
336
337
338
339
340
341static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
342{
343 if (kvm_mips_count_disabled(vcpu))
344 return false;
345
346
347 if (mips_hpt_frequency != vcpu->arch.count_hz)
348 return false;
349
350
351 if (current_cpu_data.gtoffset_mask != 0xffffffff)
352 return false;
353
354 return true;
355}
356
357
358
359
360
361
362
363
364
365
366static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
367 u32 cause)
368{
369
370
371
372
373 write_c0_gtoffset(compare - read_c0_count());
374
375 back_to_back_c0_hazard();
376 write_gc0_cause(cause);
377}
378
379
380
381
382
383
384
385
386
387
388static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
389 u32 compare, u32 cause)
390{
391 u32 start_count, after_count;
392 ktime_t freeze_time;
393 unsigned long flags;
394
395
396
397
398
399 local_irq_save(flags);
400 freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count);
401 write_c0_gtoffset(start_count - read_c0_count());
402 local_irq_restore(flags);
403
404
405 back_to_back_c0_hazard();
406 write_gc0_cause(cause);
407
408
409
410
411
412
413 back_to_back_c0_hazard();
414 after_count = read_gc0_count();
415 if (after_count - start_count > compare - start_count - 1)
416 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
417}
418
419
420
421
422
423
424
425static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
426{
427 struct mips_coproc *cop0 = vcpu->arch.cop0;
428 u32 cause, compare;
429
430 compare = kvm_read_sw_gc0_compare(cop0);
431 cause = kvm_read_sw_gc0_cause(cop0);
432
433 write_gc0_compare(compare);
434 _kvm_vz_restore_stimer(vcpu, compare, cause);
435}
436
437
438
439
440
441
442
443
444
445
446void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
447{
448 u32 gctl0;
449
450 gctl0 = read_c0_guestctl0();
451 if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
452
453 write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
454
455 _kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
456 read_gc0_cause());
457 }
458}
459
460
461
462
463
464
465
466
467
468
469
470static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
471 u32 *out_compare, u32 *out_cause)
472{
473 u32 cause, compare, before_count, end_count;
474 ktime_t before_time;
475
476 compare = read_gc0_compare();
477 *out_compare = compare;
478
479 before_time = ktime_get();
480
481
482
483
484
485 before_count = read_gc0_count();
486 back_to_back_c0_hazard();
487 cause = read_gc0_cause();
488 *out_cause = cause;
489
490
491
492
493
494
495 back_to_back_c0_hazard();
496 end_count = read_gc0_count();
497
498
499
500
501
502
503 if (end_count - before_count > compare - before_count - 1)
504 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
505
506
507
508
509
510 kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
511}
512
513
514
515
516
517
518
519
520static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
521{
522 struct mips_coproc *cop0 = vcpu->arch.cop0;
523 u32 gctl0, compare, cause;
524
525 gctl0 = read_c0_guestctl0();
526 if (gctl0 & MIPS_GCTL0_GT) {
527
528 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
529
530
531 _kvm_vz_save_htimer(vcpu, &compare, &cause);
532 } else {
533 compare = read_gc0_compare();
534 cause = read_gc0_cause();
535 }
536
537
538 kvm_write_sw_gc0_cause(cop0, cause);
539 kvm_write_sw_gc0_compare(cop0, compare);
540}
541
542
543
544
545
546
547
548
549void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
550{
551 u32 gctl0, compare, cause;
552
553 preempt_disable();
554 gctl0 = read_c0_guestctl0();
555 if (gctl0 & MIPS_GCTL0_GT) {
556
557 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
558
559
560 _kvm_vz_save_htimer(vcpu, &compare, &cause);
561
562
563 _kvm_vz_restore_stimer(vcpu, compare, cause);
564 }
565 preempt_enable();
566}
567
568
569
570
571
572
573
574
575
576
577
578
579static bool is_eva_access(union mips_instruction inst)
580{
581 if (inst.spec3_format.opcode != spec3_op)
582 return false;
583
584 switch (inst.spec3_format.func) {
585 case lwle_op:
586 case lwre_op:
587 case cachee_op:
588 case sbe_op:
589 case she_op:
590 case sce_op:
591 case swe_op:
592 case swle_op:
593 case swre_op:
594 case prefe_op:
595 case lbue_op:
596 case lhue_op:
597 case lbe_op:
598 case lhe_op:
599 case lle_op:
600 case lwe_op:
601 return true;
602 default:
603 return false;
604 }
605}
606
607
608
609
610
611
612
613
614
615
616
617
618
619static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
620{
621 u32 am_lookup;
622 int err;
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640 am_lookup = 0x70080000 << am;
641 if ((s32)am_lookup < 0) {
642
643
644
645
646 if (!eu || !(read_gc0_status() & ST0_ERL))
647 return true;
648 } else {
649 am_lookup <<= 8;
650 if ((s32)am_lookup < 0) {
651 union mips_instruction inst;
652 unsigned int status;
653 u32 *opc;
654
655
656
657
658
659 status = read_gc0_status();
660 if (!(status & (ST0_EXL | ST0_ERL)) &&
661 (status & ST0_KSU))
662 return true;
663
664
665
666
667 opc = (u32 *)vcpu->arch.pc;
668 if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
669 opc += 1;
670 err = kvm_get_badinstr(opc, vcpu, &inst.word);
671 if (!err && is_eva_access(inst))
672 return true;
673 }
674 }
675
676 return false;
677}
678
679
680
681
682
683
684
685
686
687
688
689
690
691static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
692 unsigned long *gpa)
693{
694 u32 gva32 = gva;
695 unsigned long segctl;
696
697 if ((long)gva == (s32)gva32) {
698
699 if (cpu_guest_has_segments) {
700 unsigned long mask, pa;
701
702 switch (gva32 >> 29) {
703 case 0:
704 case 1:
705 segctl = read_gc0_segctl2() >> 16;
706 mask = (unsigned long)0xfc0000000ull;
707 break;
708 case 2:
709 case 3:
710 segctl = read_gc0_segctl2();
711 mask = (unsigned long)0xfc0000000ull;
712 break;
713 case 4:
714 segctl = read_gc0_segctl1() >> 16;
715 mask = (unsigned long)0xfe0000000ull;
716 break;
717 case 5:
718 segctl = read_gc0_segctl1();
719 mask = (unsigned long)0xfe0000000ull;
720 break;
721 case 6:
722 segctl = read_gc0_segctl0() >> 16;
723 mask = (unsigned long)0xfe0000000ull;
724 break;
725 case 7:
726 segctl = read_gc0_segctl0();
727 mask = (unsigned long)0xfe0000000ull;
728 break;
729 default:
730
731
732
733
734 unreachable();
735 }
736
737 if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
738 segctl & 0x0008))
739 goto tlb_mapped;
740
741
742 pa = (segctl << 20) & mask;
743 pa |= gva32 & ~mask;
744 *gpa = pa;
745 return 0;
746 } else if ((s32)gva32 < (s32)0xc0000000) {
747
748 *gpa = gva32 & 0x1fffffff;
749 return 0;
750 }
751#ifdef CONFIG_64BIT
752 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
753
754 if (cpu_guest_has_segments) {
755
756
757
758
759 segctl = read_gc0_segctl2();
760 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
761 segctl = read_gc0_segctl1();
762 if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
763 0))
764 goto tlb_mapped;
765 }
766
767 }
768
769
770
771
772
773
774 *gpa = gva & 0x07ffffffffffffff;
775 return 0;
776#endif
777 }
778
779tlb_mapped:
780 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
781}
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
799 unsigned long *gpa)
800{
801 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
802 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
803
804
805 if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
806 *gpa = badvaddr;
807 return 0;
808 }
809
810
811 if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
812 "Unexpected gexccode %#x\n", gexccode))
813 return -EINVAL;
814
815
816 return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
817}
818
819static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
820{
821 u32 *opc = (u32 *) vcpu->arch.pc;
822 u32 cause = vcpu->arch.host_cp0_cause;
823 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
824 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
825 u32 inst = 0;
826
827
828
829
830 if (cause & CAUSEF_BD)
831 opc += 1;
832 kvm_get_badinstr(opc, vcpu, &inst);
833
834 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
835 exccode, opc, inst, badvaddr,
836 read_gc0_status());
837 kvm_arch_vcpu_dump_regs(vcpu);
838 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
839 return RESUME_HOST;
840}
841
842static unsigned long mips_process_maar(unsigned int op, unsigned long val)
843{
844
845 unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
846
847 if (read_gc0_pagegrain() & PG_ELPA)
848 mask |= 0x00ffffff00000000ull;
849 if (cpu_guest_has_mvh)
850 mask |= MIPS_MAAR_VH;
851
852
853 if (op == mtc_op) {
854
855 val &= ~MIPS_MAAR_VH;
856 } else if (op == dmtc_op) {
857
858 val &= ~MIPS_MAAR_VH;
859 if (val & MIPS_MAAR_VL)
860 val |= MIPS_MAAR_VH;
861 }
862
863 return val & mask;
864}
865
866static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
867{
868 struct mips_coproc *cop0 = vcpu->arch.cop0;
869
870 val &= MIPS_MAARI_INDEX;
871 if (val == MIPS_MAARI_INDEX)
872 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
873 else if (val < ARRAY_SIZE(vcpu->arch.maar))
874 kvm_write_sw_gc0_maari(cop0, val);
875}
876
877static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
878 u32 *opc, u32 cause,
879 struct kvm_vcpu *vcpu)
880{
881 struct mips_coproc *cop0 = vcpu->arch.cop0;
882 enum emulation_result er = EMULATE_DONE;
883 u32 rt, rd, sel;
884 unsigned long curr_pc;
885 unsigned long val;
886
887
888
889
890
891 curr_pc = vcpu->arch.pc;
892 er = update_pc(vcpu, cause);
893 if (er == EMULATE_FAIL)
894 return er;
895
896 if (inst.co_format.co) {
897 switch (inst.co_format.func) {
898 case wait_op:
899 er = kvm_mips_emul_wait(vcpu);
900 break;
901 default:
902 er = EMULATE_FAIL;
903 }
904 } else {
905 rt = inst.c0r_format.rt;
906 rd = inst.c0r_format.rd;
907 sel = inst.c0r_format.sel;
908
909 switch (inst.c0r_format.rs) {
910 case dmfc_op:
911 case mfc_op:
912#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
913 cop0->stat[rd][sel]++;
914#endif
915 if (rd == MIPS_CP0_COUNT &&
916 sel == 0) {
917 val = kvm_mips_read_count(vcpu);
918 } else if (rd == MIPS_CP0_COMPARE &&
919 sel == 0) {
920 val = read_gc0_compare();
921 } else if (rd == MIPS_CP0_LLADDR &&
922 sel == 0) {
923 if (cpu_guest_has_rw_llb)
924 val = read_gc0_lladdr() &
925 MIPS_LLADDR_LLB;
926 else
927 val = 0;
928 } else if (rd == MIPS_CP0_LLADDR &&
929 sel == 1 &&
930 cpu_guest_has_maar &&
931 !cpu_guest_has_dyn_maar) {
932
933 BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
934 ARRAY_SIZE(vcpu->arch.maar));
935 val = vcpu->arch.maar[
936 kvm_read_sw_gc0_maari(cop0)];
937 } else if ((rd == MIPS_CP0_PRID &&
938 (sel == 0 ||
939 sel == 2 ||
940 sel == 3)) ||
941 (rd == MIPS_CP0_STATUS &&
942 (sel == 2 ||
943 sel == 3)) ||
944 (rd == MIPS_CP0_CONFIG &&
945 (sel == 6 ||
946 sel == 7)) ||
947 (rd == MIPS_CP0_LLADDR &&
948 (sel == 2) &&
949 cpu_guest_has_maar &&
950 !cpu_guest_has_dyn_maar) ||
951 (rd == MIPS_CP0_ERRCTL &&
952 (sel == 0))) {
953 val = cop0->reg[rd][sel];
954#ifdef CONFIG_CPU_LOONGSON64
955 } else if (rd == MIPS_CP0_DIAG &&
956 (sel == 0)) {
957 val = cop0->reg[rd][sel];
958#endif
959 } else {
960 val = 0;
961 er = EMULATE_FAIL;
962 }
963
964 if (er != EMULATE_FAIL) {
965
966 if (inst.c0r_format.rs == mfc_op)
967 val = (int)val;
968 vcpu->arch.gprs[rt] = val;
969 }
970
971 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
972 KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
973 KVM_TRACE_COP0(rd, sel), val);
974 break;
975
976 case dmtc_op:
977 case mtc_op:
978#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
979 cop0->stat[rd][sel]++;
980#endif
981 val = vcpu->arch.gprs[rt];
982 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
983 KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
984 KVM_TRACE_COP0(rd, sel), val);
985
986 if (rd == MIPS_CP0_COUNT &&
987 sel == 0) {
988 kvm_vz_lose_htimer(vcpu);
989 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
990 } else if (rd == MIPS_CP0_COMPARE &&
991 sel == 0) {
992 kvm_mips_write_compare(vcpu,
993 vcpu->arch.gprs[rt],
994 true);
995 } else if (rd == MIPS_CP0_LLADDR &&
996 sel == 0) {
997
998
999
1000
1001 if (cpu_guest_has_rw_llb &&
1002 !(val & MIPS_LLADDR_LLB))
1003 write_gc0_lladdr(0);
1004 } else if (rd == MIPS_CP0_LLADDR &&
1005 sel == 1 &&
1006 cpu_guest_has_maar &&
1007 !cpu_guest_has_dyn_maar) {
1008 val = mips_process_maar(inst.c0r_format.rs,
1009 val);
1010
1011
1012 BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
1013 ARRAY_SIZE(vcpu->arch.maar));
1014 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
1015 val;
1016 } else if (rd == MIPS_CP0_LLADDR &&
1017 (sel == 2) &&
1018 cpu_guest_has_maar &&
1019 !cpu_guest_has_dyn_maar) {
1020 kvm_write_maari(vcpu, val);
1021 } else if (rd == MIPS_CP0_CONFIG &&
1022 (sel == 6)) {
1023 cop0->reg[rd][sel] = (int)val;
1024 } else if (rd == MIPS_CP0_ERRCTL &&
1025 (sel == 0)) {
1026
1027#ifdef CONFIG_CPU_LOONGSON64
1028 } else if (rd == MIPS_CP0_DIAG &&
1029 (sel == 0)) {
1030 unsigned long flags;
1031
1032 local_irq_save(flags);
1033 if (val & LOONGSON_DIAG_BTB) {
1034
1035 set_c0_diag(LOONGSON_DIAG_BTB);
1036 }
1037 if (val & LOONGSON_DIAG_ITLB) {
1038
1039 set_c0_diag(LOONGSON_DIAG_ITLB);
1040 }
1041 if (val & LOONGSON_DIAG_DTLB) {
1042
1043 set_c0_diag(LOONGSON_DIAG_DTLB);
1044 }
1045 if (val & LOONGSON_DIAG_VTLB) {
1046
1047 kvm_loongson_clear_guest_vtlb();
1048 }
1049 if (val & LOONGSON_DIAG_FTLB) {
1050
1051 kvm_loongson_clear_guest_ftlb();
1052 }
1053 local_irq_restore(flags);
1054#endif
1055 } else {
1056 er = EMULATE_FAIL;
1057 }
1058 break;
1059
1060 default:
1061 er = EMULATE_FAIL;
1062 break;
1063 }
1064 }
1065
1066 if (er == EMULATE_FAIL) {
1067 kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
1068 curr_pc, __func__, inst.word);
1069
1070 vcpu->arch.pc = curr_pc;
1071 }
1072
1073 return er;
1074}
1075
1076static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
1077 u32 *opc, u32 cause,
1078 struct kvm_vcpu *vcpu)
1079{
1080 enum emulation_result er = EMULATE_DONE;
1081 u32 cache, op_inst, op, base;
1082 s16 offset;
1083 struct kvm_vcpu_arch *arch = &vcpu->arch;
1084 unsigned long va, curr_pc;
1085
1086
1087
1088
1089
1090 curr_pc = vcpu->arch.pc;
1091 er = update_pc(vcpu, cause);
1092 if (er == EMULATE_FAIL)
1093 return er;
1094
1095 base = inst.i_format.rs;
1096 op_inst = inst.i_format.rt;
1097 if (cpu_has_mips_r6)
1098 offset = inst.spec3_format.simmediate;
1099 else
1100 offset = inst.i_format.simmediate;
1101 cache = op_inst & CacheOp_Cache;
1102 op = op_inst & CacheOp_Op;
1103
1104 va = arch->gprs[base] + offset;
1105
1106 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1107 cache, op, base, arch->gprs[base], offset);
1108
1109
1110 if (cache != Cache_I && cache != Cache_D)
1111 return EMULATE_DONE;
1112
1113 switch (op_inst) {
1114 case Index_Invalidate_I:
1115 flush_icache_line_indexed(va);
1116 return EMULATE_DONE;
1117 case Index_Writeback_Inv_D:
1118 flush_dcache_line_indexed(va);
1119 return EMULATE_DONE;
1120 case Hit_Invalidate_I:
1121 case Hit_Invalidate_D:
1122 case Hit_Writeback_Inv_D:
1123 if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
1124
1125 local_flush_icache_range(0, 0);
1126 return EMULATE_DONE;
1127 }
1128
1129
1130 break;
1131 default:
1132 break;
1133 }
1134
1135 kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1136 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
1137 offset);
1138
1139 vcpu->arch.pc = curr_pc;
1140
1141 return EMULATE_FAIL;
1142}
1143
1144#ifdef CONFIG_CPU_LOONGSON64
1145static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
1146 u32 *opc, u32 cause,
1147 struct kvm_vcpu *vcpu)
1148{
1149 unsigned int rs, rd;
1150 unsigned int hostcfg;
1151 unsigned long curr_pc;
1152 enum emulation_result er = EMULATE_DONE;
1153
1154
1155
1156
1157
1158 curr_pc = vcpu->arch.pc;
1159 er = update_pc(vcpu, cause);
1160 if (er == EMULATE_FAIL)
1161 return er;
1162
1163 rs = inst.loongson3_lscsr_format.rs;
1164 rd = inst.loongson3_lscsr_format.rd;
1165 switch (inst.loongson3_lscsr_format.fr) {
1166 case 0x8:
1167 ++vcpu->stat.vz_cpucfg_exits;
1168 hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
1169
1170 switch (vcpu->arch.gprs[rs]) {
1171 case LOONGSON_CFG0:
1172 vcpu->arch.gprs[rd] = 0x14c000;
1173 break;
1174 case LOONGSON_CFG1:
1175 hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
1176 LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
1177 LOONGSON_CFG1_SFBP);
1178 vcpu->arch.gprs[rd] = hostcfg;
1179 break;
1180 case LOONGSON_CFG2:
1181 hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
1182 LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
1183 vcpu->arch.gprs[rd] = hostcfg;
1184 break;
1185 case LOONGSON_CFG3:
1186 vcpu->arch.gprs[rd] = hostcfg;
1187 break;
1188 default:
1189
1190 vcpu->arch.gprs[rd] = 0;
1191 break;
1192 }
1193 break;
1194
1195 default:
1196 kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
1197 inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
1198 er = EMULATE_FAIL;
1199 break;
1200 }
1201
1202
1203 if (er == EMULATE_FAIL) {
1204 kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
1205 curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
1206
1207 vcpu->arch.pc = curr_pc;
1208 }
1209
1210 return er;
1211}
1212#endif
1213
1214static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
1215 struct kvm_vcpu *vcpu)
1216{
1217 enum emulation_result er = EMULATE_DONE;
1218 struct kvm_vcpu_arch *arch = &vcpu->arch;
1219 union mips_instruction inst;
1220 int rd, rt, sel;
1221 int err;
1222
1223
1224
1225
1226 if (cause & CAUSEF_BD)
1227 opc += 1;
1228 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1229 if (err)
1230 return EMULATE_FAIL;
1231
1232 switch (inst.r_format.opcode) {
1233 case cop0_op:
1234 er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
1235 break;
1236#ifndef CONFIG_CPU_MIPSR6
1237 case cache_op:
1238 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1239 er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
1240 break;
1241#endif
1242#ifdef CONFIG_CPU_LOONGSON64
1243 case lwc2_op:
1244 er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu);
1245 break;
1246#endif
1247 case spec3_op:
1248 switch (inst.spec3_format.func) {
1249#ifdef CONFIG_CPU_MIPSR6
1250 case cache6_op:
1251 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1252 er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
1253 break;
1254#endif
1255 case rdhwr_op:
1256 if (inst.r_format.rs || (inst.r_format.re >> 3))
1257 goto unknown;
1258
1259 rd = inst.r_format.rd;
1260 rt = inst.r_format.rt;
1261 sel = inst.r_format.re & 0x7;
1262
1263 switch (rd) {
1264 case MIPS_HWR_CC:
1265 arch->gprs[rt] =
1266 (long)(int)kvm_mips_read_count(vcpu);
1267 break;
1268 default:
1269 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1270 KVM_TRACE_HWR(rd, sel), 0);
1271 goto unknown;
1272 }
1273
1274 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1275 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
1276
1277 er = update_pc(vcpu, cause);
1278 break;
1279 default:
1280 goto unknown;
1281 }
1282 break;
1283unknown:
1284
1285 default:
1286 kvm_err("GPSI exception not supported (%p/%#x)\n",
1287 opc, inst.word);
1288 kvm_arch_vcpu_dump_regs(vcpu);
1289 er = EMULATE_FAIL;
1290 break;
1291 }
1292
1293 return er;
1294}
1295
1296static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
1297 struct kvm_vcpu *vcpu)
1298{
1299 enum emulation_result er = EMULATE_DONE;
1300 struct kvm_vcpu_arch *arch = &vcpu->arch;
1301 union mips_instruction inst;
1302 int err;
1303
1304
1305
1306
1307 if (cause & CAUSEF_BD)
1308 opc += 1;
1309 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1310 if (err)
1311 return EMULATE_FAIL;
1312
1313
1314 if (inst.c0r_format.opcode == cop0_op &&
1315 inst.c0r_format.rs == mtc_op &&
1316 inst.c0r_format.z == 0) {
1317 int rt = inst.c0r_format.rt;
1318 int rd = inst.c0r_format.rd;
1319 int sel = inst.c0r_format.sel;
1320 unsigned int val = arch->gprs[rt];
1321 unsigned int old_val, change;
1322
1323 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
1324 val);
1325
1326 if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1327
1328 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1329 val &= ~(ST0_CU1 | ST0_FR);
1330
1331
1332
1333
1334
1335 if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
1336 val &= ~ST0_FR;
1337
1338 old_val = read_gc0_status();
1339 change = val ^ old_val;
1340
1341 if (change & ST0_FR) {
1342
1343
1344
1345
1346
1347 kvm_drop_fpu(vcpu);
1348 }
1349
1350
1351
1352
1353
1354
1355
1356
1357 if (change & ST0_CU1 && !(val & ST0_FR) &&
1358 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1359 kvm_lose_fpu(vcpu);
1360
1361 write_gc0_status(val);
1362 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1363 u32 old_cause = read_gc0_cause();
1364 u32 change = old_cause ^ val;
1365
1366
1367 if (change & CAUSEF_DC) {
1368 if (val & CAUSEF_DC) {
1369 kvm_vz_lose_htimer(vcpu);
1370 kvm_mips_count_disable_cause(vcpu);
1371 } else {
1372 kvm_mips_count_enable_cause(vcpu);
1373 }
1374 }
1375
1376
1377 change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
1378 CAUSEF_IP0 | CAUSEF_IP1);
1379
1380
1381 change &= ~CAUSEF_WP | old_cause;
1382
1383 write_gc0_cause(old_cause ^ change);
1384 } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) {
1385 write_gc0_intctl(val);
1386 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1387 old_val = read_gc0_config5();
1388 change = val ^ old_val;
1389
1390 preempt_disable();
1391
1392
1393
1394
1395
1396 if (change & MIPS_CONF5_FRE &&
1397 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1398 change_c0_config5(MIPS_CONF5_FRE, val);
1399
1400 preempt_enable();
1401
1402 val = old_val ^
1403 (change & kvm_vz_config5_guest_wrmask(vcpu));
1404 write_gc0_config5(val);
1405 } else {
1406 kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
1407 opc, inst.word);
1408 er = EMULATE_FAIL;
1409 }
1410
1411 if (er != EMULATE_FAIL)
1412 er = update_pc(vcpu, cause);
1413 } else {
1414 kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
1415 opc, inst.word);
1416 er = EMULATE_FAIL;
1417 }
1418
1419 return er;
1420}
1421
1422static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
1423 struct kvm_vcpu *vcpu)
1424{
1425
1426
1427
1428
1429 trace_kvm_guest_mode_change(vcpu);
1430
1431 return EMULATE_DONE;
1432}
1433
1434static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
1435 struct kvm_vcpu *vcpu)
1436{
1437 enum emulation_result er;
1438 union mips_instruction inst;
1439 unsigned long curr_pc;
1440 int err;
1441
1442 if (cause & CAUSEF_BD)
1443 opc += 1;
1444 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1445 if (err)
1446 return EMULATE_FAIL;
1447
1448
1449
1450
1451
1452 curr_pc = vcpu->arch.pc;
1453 er = update_pc(vcpu, cause);
1454 if (er == EMULATE_FAIL)
1455 return er;
1456
1457 er = kvm_mips_emul_hypcall(vcpu, inst);
1458 if (er == EMULATE_FAIL)
1459 vcpu->arch.pc = curr_pc;
1460
1461 return er;
1462}
1463
1464static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
1465 u32 cause,
1466 u32 *opc,
1467 struct kvm_vcpu *vcpu)
1468{
1469 u32 inst;
1470
1471
1472
1473
1474 if (cause & CAUSEF_BD)
1475 opc += 1;
1476 kvm_get_badinstr(opc, vcpu, &inst);
1477
1478 kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n",
1479 gexccode, opc, inst, read_gc0_status());
1480
1481 return EMULATE_FAIL;
1482}
1483
1484static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
1485{
1486 u32 *opc = (u32 *) vcpu->arch.pc;
1487 u32 cause = vcpu->arch.host_cp0_cause;
1488 enum emulation_result er = EMULATE_DONE;
1489 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
1490 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
1491 int ret = RESUME_GUEST;
1492
1493 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
1494 switch (gexccode) {
1495 case MIPS_GCTL0_GEXC_GPSI:
1496 ++vcpu->stat.vz_gpsi_exits;
1497 er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
1498 break;
1499 case MIPS_GCTL0_GEXC_GSFC:
1500 ++vcpu->stat.vz_gsfc_exits;
1501 er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
1502 break;
1503 case MIPS_GCTL0_GEXC_HC:
1504 ++vcpu->stat.vz_hc_exits;
1505 er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
1506 break;
1507 case MIPS_GCTL0_GEXC_GRR:
1508 ++vcpu->stat.vz_grr_exits;
1509 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1510 vcpu);
1511 break;
1512 case MIPS_GCTL0_GEXC_GVA:
1513 ++vcpu->stat.vz_gva_exits;
1514 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1515 vcpu);
1516 break;
1517 case MIPS_GCTL0_GEXC_GHFC:
1518 ++vcpu->stat.vz_ghfc_exits;
1519 er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
1520 break;
1521 case MIPS_GCTL0_GEXC_GPA:
1522 ++vcpu->stat.vz_gpa_exits;
1523 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1524 vcpu);
1525 break;
1526 default:
1527 ++vcpu->stat.vz_resvd_exits;
1528 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1529 vcpu);
1530 break;
1531
1532 }
1533
1534 if (er == EMULATE_DONE) {
1535 ret = RESUME_GUEST;
1536 } else if (er == EMULATE_HYPERCALL) {
1537 ret = kvm_mips_handle_hypcall(vcpu);
1538 } else {
1539 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1540 ret = RESUME_HOST;
1541 }
1542 return ret;
1543}
1544
1545
1546
1547
1548
1549
1550
1551
1552static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
1553{
1554 u32 cause = vcpu->arch.host_cp0_cause;
1555 enum emulation_result er = EMULATE_FAIL;
1556 int ret = RESUME_GUEST;
1557
1558 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
1559
1560
1561
1562
1563
1564 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
1565 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1566 preempt_enable();
1567 return EMULATE_FAIL;
1568 }
1569
1570 kvm_own_fpu(vcpu);
1571 er = EMULATE_DONE;
1572 }
1573
1574
1575 switch (er) {
1576 case EMULATE_DONE:
1577 ret = RESUME_GUEST;
1578 break;
1579
1580 case EMULATE_FAIL:
1581 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1582 ret = RESUME_HOST;
1583 break;
1584
1585 default:
1586 BUG();
1587 }
1588 return ret;
1589}
1590
1591
1592
1593
1594
1595
1596
1597
1598static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
1599{
1600
1601
1602
1603
1604
1605
1606 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
1607 (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
1608 !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
1609 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1610 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1611 return RESUME_HOST;
1612 }
1613
1614 kvm_own_msa(vcpu);
1615
1616 return RESUME_GUEST;
1617}
1618
1619static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
1620{
1621 struct kvm_run *run = vcpu->run;
1622 u32 *opc = (u32 *) vcpu->arch.pc;
1623 u32 cause = vcpu->arch.host_cp0_cause;
1624 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1625 union mips_instruction inst;
1626 enum emulation_result er = EMULATE_DONE;
1627 int err, ret = RESUME_GUEST;
1628
1629 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
1630
1631 if (kvm_is_ifetch_fault(&vcpu->arch)) {
1632 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1633 return RESUME_HOST;
1634 }
1635
1636
1637 if (cause & CAUSEF_BD)
1638 opc += 1;
1639 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1640 if (err) {
1641 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1642 return RESUME_HOST;
1643 }
1644
1645
1646 er = kvm_mips_emulate_load(inst, cause, vcpu);
1647 if (er == EMULATE_FAIL) {
1648 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1649 opc, badvaddr);
1650 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1651 }
1652 }
1653
1654 if (er == EMULATE_DONE) {
1655 ret = RESUME_GUEST;
1656 } else if (er == EMULATE_DO_MMIO) {
1657 run->exit_reason = KVM_EXIT_MMIO;
1658 ret = RESUME_HOST;
1659 } else {
1660 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1661 ret = RESUME_HOST;
1662 }
1663 return ret;
1664}
1665
1666static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
1667{
1668 struct kvm_run *run = vcpu->run;
1669 u32 *opc = (u32 *) vcpu->arch.pc;
1670 u32 cause = vcpu->arch.host_cp0_cause;
1671 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1672 union mips_instruction inst;
1673 enum emulation_result er = EMULATE_DONE;
1674 int err;
1675 int ret = RESUME_GUEST;
1676
1677
1678 if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
1679 return RESUME_GUEST;
1680 vcpu->arch.host_cp0_badvaddr = badvaddr;
1681
1682 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
1683
1684 if (cause & CAUSEF_BD)
1685 opc += 1;
1686 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1687 if (err) {
1688 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1689 return RESUME_HOST;
1690 }
1691
1692
1693 er = kvm_mips_emulate_store(inst, cause, vcpu);
1694 if (er == EMULATE_FAIL) {
1695 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1696 opc, badvaddr);
1697 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1698 }
1699 }
1700
1701 if (er == EMULATE_DONE) {
1702 ret = RESUME_GUEST;
1703 } else if (er == EMULATE_DO_MMIO) {
1704 run->exit_reason = KVM_EXIT_MMIO;
1705 ret = RESUME_HOST;
1706 } else {
1707 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1708 ret = RESUME_HOST;
1709 }
1710 return ret;
1711}
1712
1713static u64 kvm_vz_get_one_regs[] = {
1714 KVM_REG_MIPS_CP0_INDEX,
1715 KVM_REG_MIPS_CP0_ENTRYLO0,
1716 KVM_REG_MIPS_CP0_ENTRYLO1,
1717 KVM_REG_MIPS_CP0_CONTEXT,
1718 KVM_REG_MIPS_CP0_PAGEMASK,
1719 KVM_REG_MIPS_CP0_PAGEGRAIN,
1720 KVM_REG_MIPS_CP0_WIRED,
1721 KVM_REG_MIPS_CP0_HWRENA,
1722 KVM_REG_MIPS_CP0_BADVADDR,
1723 KVM_REG_MIPS_CP0_COUNT,
1724 KVM_REG_MIPS_CP0_ENTRYHI,
1725 KVM_REG_MIPS_CP0_COMPARE,
1726 KVM_REG_MIPS_CP0_STATUS,
1727 KVM_REG_MIPS_CP0_INTCTL,
1728 KVM_REG_MIPS_CP0_CAUSE,
1729 KVM_REG_MIPS_CP0_EPC,
1730 KVM_REG_MIPS_CP0_PRID,
1731 KVM_REG_MIPS_CP0_EBASE,
1732 KVM_REG_MIPS_CP0_CONFIG,
1733 KVM_REG_MIPS_CP0_CONFIG1,
1734 KVM_REG_MIPS_CP0_CONFIG2,
1735 KVM_REG_MIPS_CP0_CONFIG3,
1736 KVM_REG_MIPS_CP0_CONFIG4,
1737 KVM_REG_MIPS_CP0_CONFIG5,
1738 KVM_REG_MIPS_CP0_CONFIG6,
1739#ifdef CONFIG_64BIT
1740 KVM_REG_MIPS_CP0_XCONTEXT,
1741#endif
1742 KVM_REG_MIPS_CP0_ERROREPC,
1743
1744 KVM_REG_MIPS_COUNT_CTL,
1745 KVM_REG_MIPS_COUNT_RESUME,
1746 KVM_REG_MIPS_COUNT_HZ,
1747};
1748
1749static u64 kvm_vz_get_one_regs_contextconfig[] = {
1750 KVM_REG_MIPS_CP0_CONTEXTCONFIG,
1751#ifdef CONFIG_64BIT
1752 KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
1753#endif
1754};
1755
1756static u64 kvm_vz_get_one_regs_segments[] = {
1757 KVM_REG_MIPS_CP0_SEGCTL0,
1758 KVM_REG_MIPS_CP0_SEGCTL1,
1759 KVM_REG_MIPS_CP0_SEGCTL2,
1760};
1761
1762static u64 kvm_vz_get_one_regs_htw[] = {
1763 KVM_REG_MIPS_CP0_PWBASE,
1764 KVM_REG_MIPS_CP0_PWFIELD,
1765 KVM_REG_MIPS_CP0_PWSIZE,
1766 KVM_REG_MIPS_CP0_PWCTL,
1767};
1768
1769static u64 kvm_vz_get_one_regs_kscratch[] = {
1770 KVM_REG_MIPS_CP0_KSCRATCH1,
1771 KVM_REG_MIPS_CP0_KSCRATCH2,
1772 KVM_REG_MIPS_CP0_KSCRATCH3,
1773 KVM_REG_MIPS_CP0_KSCRATCH4,
1774 KVM_REG_MIPS_CP0_KSCRATCH5,
1775 KVM_REG_MIPS_CP0_KSCRATCH6,
1776};
1777
1778static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
1779{
1780 unsigned long ret;
1781
1782 ret = ARRAY_SIZE(kvm_vz_get_one_regs);
1783 if (cpu_guest_has_userlocal)
1784 ++ret;
1785 if (cpu_guest_has_badinstr)
1786 ++ret;
1787 if (cpu_guest_has_badinstrp)
1788 ++ret;
1789 if (cpu_guest_has_contextconfig)
1790 ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1791 if (cpu_guest_has_segments)
1792 ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1793 if (cpu_guest_has_htw || cpu_guest_has_ldpte)
1794 ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1795 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
1796 ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
1797 ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
1798
1799 return ret;
1800}
1801
1802static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
1803{
1804 u64 index;
1805 unsigned int i;
1806
1807 if (copy_to_user(indices, kvm_vz_get_one_regs,
1808 sizeof(kvm_vz_get_one_regs)))
1809 return -EFAULT;
1810 indices += ARRAY_SIZE(kvm_vz_get_one_regs);
1811
1812 if (cpu_guest_has_userlocal) {
1813 index = KVM_REG_MIPS_CP0_USERLOCAL;
1814 if (copy_to_user(indices, &index, sizeof(index)))
1815 return -EFAULT;
1816 ++indices;
1817 }
1818 if (cpu_guest_has_badinstr) {
1819 index = KVM_REG_MIPS_CP0_BADINSTR;
1820 if (copy_to_user(indices, &index, sizeof(index)))
1821 return -EFAULT;
1822 ++indices;
1823 }
1824 if (cpu_guest_has_badinstrp) {
1825 index = KVM_REG_MIPS_CP0_BADINSTRP;
1826 if (copy_to_user(indices, &index, sizeof(index)))
1827 return -EFAULT;
1828 ++indices;
1829 }
1830 if (cpu_guest_has_contextconfig) {
1831 if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
1832 sizeof(kvm_vz_get_one_regs_contextconfig)))
1833 return -EFAULT;
1834 indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1835 }
1836 if (cpu_guest_has_segments) {
1837 if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
1838 sizeof(kvm_vz_get_one_regs_segments)))
1839 return -EFAULT;
1840 indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1841 }
1842 if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
1843 if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
1844 sizeof(kvm_vz_get_one_regs_htw)))
1845 return -EFAULT;
1846 indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1847 }
1848 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
1849 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
1850 index = KVM_REG_MIPS_CP0_MAAR(i);
1851 if (copy_to_user(indices, &index, sizeof(index)))
1852 return -EFAULT;
1853 ++indices;
1854 }
1855
1856 index = KVM_REG_MIPS_CP0_MAARI;
1857 if (copy_to_user(indices, &index, sizeof(index)))
1858 return -EFAULT;
1859 ++indices;
1860 }
1861 for (i = 0; i < 6; ++i) {
1862 if (!cpu_guest_has_kscr(i + 2))
1863 continue;
1864
1865 if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
1866 sizeof(kvm_vz_get_one_regs_kscratch[i])))
1867 return -EFAULT;
1868 ++indices;
1869 }
1870
1871 return 0;
1872}
1873
1874static inline s64 entrylo_kvm_to_user(unsigned long v)
1875{
1876 s64 mask, ret = v;
1877
1878 if (BITS_PER_LONG == 32) {
1879
1880
1881
1882
1883 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1884 ret &= ~mask;
1885 ret |= ((s64)v & mask) << 32;
1886 }
1887 return ret;
1888}
1889
1890static inline unsigned long entrylo_user_to_kvm(s64 v)
1891{
1892 unsigned long mask, ret = v;
1893
1894 if (BITS_PER_LONG == 32) {
1895
1896
1897
1898
1899 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1900 ret &= ~mask;
1901 ret |= (v >> 32) & mask;
1902 }
1903 return ret;
1904}
1905
1906static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
1907 const struct kvm_one_reg *reg,
1908 s64 *v)
1909{
1910 struct mips_coproc *cop0 = vcpu->arch.cop0;
1911 unsigned int idx;
1912
1913 switch (reg->id) {
1914 case KVM_REG_MIPS_CP0_INDEX:
1915 *v = (long)read_gc0_index();
1916 break;
1917 case KVM_REG_MIPS_CP0_ENTRYLO0:
1918 *v = entrylo_kvm_to_user(read_gc0_entrylo0());
1919 break;
1920 case KVM_REG_MIPS_CP0_ENTRYLO1:
1921 *v = entrylo_kvm_to_user(read_gc0_entrylo1());
1922 break;
1923 case KVM_REG_MIPS_CP0_CONTEXT:
1924 *v = (long)read_gc0_context();
1925 break;
1926 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1927 if (!cpu_guest_has_contextconfig)
1928 return -EINVAL;
1929 *v = read_gc0_contextconfig();
1930 break;
1931 case KVM_REG_MIPS_CP0_USERLOCAL:
1932 if (!cpu_guest_has_userlocal)
1933 return -EINVAL;
1934 *v = read_gc0_userlocal();
1935 break;
1936#ifdef CONFIG_64BIT
1937 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1938 if (!cpu_guest_has_contextconfig)
1939 return -EINVAL;
1940 *v = read_gc0_xcontextconfig();
1941 break;
1942#endif
1943 case KVM_REG_MIPS_CP0_PAGEMASK:
1944 *v = (long)read_gc0_pagemask();
1945 break;
1946 case KVM_REG_MIPS_CP0_PAGEGRAIN:
1947 *v = (long)read_gc0_pagegrain();
1948 break;
1949 case KVM_REG_MIPS_CP0_SEGCTL0:
1950 if (!cpu_guest_has_segments)
1951 return -EINVAL;
1952 *v = read_gc0_segctl0();
1953 break;
1954 case KVM_REG_MIPS_CP0_SEGCTL1:
1955 if (!cpu_guest_has_segments)
1956 return -EINVAL;
1957 *v = read_gc0_segctl1();
1958 break;
1959 case KVM_REG_MIPS_CP0_SEGCTL2:
1960 if (!cpu_guest_has_segments)
1961 return -EINVAL;
1962 *v = read_gc0_segctl2();
1963 break;
1964 case KVM_REG_MIPS_CP0_PWBASE:
1965 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1966 return -EINVAL;
1967 *v = read_gc0_pwbase();
1968 break;
1969 case KVM_REG_MIPS_CP0_PWFIELD:
1970 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1971 return -EINVAL;
1972 *v = read_gc0_pwfield();
1973 break;
1974 case KVM_REG_MIPS_CP0_PWSIZE:
1975 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1976 return -EINVAL;
1977 *v = read_gc0_pwsize();
1978 break;
1979 case KVM_REG_MIPS_CP0_WIRED:
1980 *v = (long)read_gc0_wired();
1981 break;
1982 case KVM_REG_MIPS_CP0_PWCTL:
1983 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1984 return -EINVAL;
1985 *v = read_gc0_pwctl();
1986 break;
1987 case KVM_REG_MIPS_CP0_HWRENA:
1988 *v = (long)read_gc0_hwrena();
1989 break;
1990 case KVM_REG_MIPS_CP0_BADVADDR:
1991 *v = (long)read_gc0_badvaddr();
1992 break;
1993 case KVM_REG_MIPS_CP0_BADINSTR:
1994 if (!cpu_guest_has_badinstr)
1995 return -EINVAL;
1996 *v = read_gc0_badinstr();
1997 break;
1998 case KVM_REG_MIPS_CP0_BADINSTRP:
1999 if (!cpu_guest_has_badinstrp)
2000 return -EINVAL;
2001 *v = read_gc0_badinstrp();
2002 break;
2003 case KVM_REG_MIPS_CP0_COUNT:
2004 *v = kvm_mips_read_count(vcpu);
2005 break;
2006 case KVM_REG_MIPS_CP0_ENTRYHI:
2007 *v = (long)read_gc0_entryhi();
2008 break;
2009 case KVM_REG_MIPS_CP0_COMPARE:
2010 *v = (long)read_gc0_compare();
2011 break;
2012 case KVM_REG_MIPS_CP0_STATUS:
2013 *v = (long)read_gc0_status();
2014 break;
2015 case KVM_REG_MIPS_CP0_INTCTL:
2016 *v = read_gc0_intctl();
2017 break;
2018 case KVM_REG_MIPS_CP0_CAUSE:
2019 *v = (long)read_gc0_cause();
2020 break;
2021 case KVM_REG_MIPS_CP0_EPC:
2022 *v = (long)read_gc0_epc();
2023 break;
2024 case KVM_REG_MIPS_CP0_PRID:
2025 switch (boot_cpu_type()) {
2026 case CPU_CAVIUM_OCTEON3:
2027
2028 *v = read_gc0_prid();
2029 break;
2030 default:
2031 *v = (long)kvm_read_c0_guest_prid(cop0);
2032 break;
2033 }
2034 break;
2035 case KVM_REG_MIPS_CP0_EBASE:
2036 *v = kvm_vz_read_gc0_ebase();
2037 break;
2038 case KVM_REG_MIPS_CP0_CONFIG:
2039 *v = read_gc0_config();
2040 break;
2041 case KVM_REG_MIPS_CP0_CONFIG1:
2042 if (!cpu_guest_has_conf1)
2043 return -EINVAL;
2044 *v = read_gc0_config1();
2045 break;
2046 case KVM_REG_MIPS_CP0_CONFIG2:
2047 if (!cpu_guest_has_conf2)
2048 return -EINVAL;
2049 *v = read_gc0_config2();
2050 break;
2051 case KVM_REG_MIPS_CP0_CONFIG3:
2052 if (!cpu_guest_has_conf3)
2053 return -EINVAL;
2054 *v = read_gc0_config3();
2055 break;
2056 case KVM_REG_MIPS_CP0_CONFIG4:
2057 if (!cpu_guest_has_conf4)
2058 return -EINVAL;
2059 *v = read_gc0_config4();
2060 break;
2061 case KVM_REG_MIPS_CP0_CONFIG5:
2062 if (!cpu_guest_has_conf5)
2063 return -EINVAL;
2064 *v = read_gc0_config5();
2065 break;
2066 case KVM_REG_MIPS_CP0_CONFIG6:
2067 *v = kvm_read_sw_gc0_config6(cop0);
2068 break;
2069 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2070 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2071 return -EINVAL;
2072 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2073 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2074 return -EINVAL;
2075 *v = vcpu->arch.maar[idx];
2076 break;
2077 case KVM_REG_MIPS_CP0_MAARI:
2078 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2079 return -EINVAL;
2080 *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
2081 break;
2082#ifdef CONFIG_64BIT
2083 case KVM_REG_MIPS_CP0_XCONTEXT:
2084 *v = read_gc0_xcontext();
2085 break;
2086#endif
2087 case KVM_REG_MIPS_CP0_ERROREPC:
2088 *v = (long)read_gc0_errorepc();
2089 break;
2090 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2091 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2092 if (!cpu_guest_has_kscr(idx))
2093 return -EINVAL;
2094 switch (idx) {
2095 case 2:
2096 *v = (long)read_gc0_kscratch1();
2097 break;
2098 case 3:
2099 *v = (long)read_gc0_kscratch2();
2100 break;
2101 case 4:
2102 *v = (long)read_gc0_kscratch3();
2103 break;
2104 case 5:
2105 *v = (long)read_gc0_kscratch4();
2106 break;
2107 case 6:
2108 *v = (long)read_gc0_kscratch5();
2109 break;
2110 case 7:
2111 *v = (long)read_gc0_kscratch6();
2112 break;
2113 }
2114 break;
2115 case KVM_REG_MIPS_COUNT_CTL:
2116 *v = vcpu->arch.count_ctl;
2117 break;
2118 case KVM_REG_MIPS_COUNT_RESUME:
2119 *v = ktime_to_ns(vcpu->arch.count_resume);
2120 break;
2121 case KVM_REG_MIPS_COUNT_HZ:
2122 *v = vcpu->arch.count_hz;
2123 break;
2124 default:
2125 return -EINVAL;
2126 }
2127 return 0;
2128}
2129
2130static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
2131 const struct kvm_one_reg *reg,
2132 s64 v)
2133{
2134 struct mips_coproc *cop0 = vcpu->arch.cop0;
2135 unsigned int idx;
2136 int ret = 0;
2137 unsigned int cur, change;
2138
2139 switch (reg->id) {
2140 case KVM_REG_MIPS_CP0_INDEX:
2141 write_gc0_index(v);
2142 break;
2143 case KVM_REG_MIPS_CP0_ENTRYLO0:
2144 write_gc0_entrylo0(entrylo_user_to_kvm(v));
2145 break;
2146 case KVM_REG_MIPS_CP0_ENTRYLO1:
2147 write_gc0_entrylo1(entrylo_user_to_kvm(v));
2148 break;
2149 case KVM_REG_MIPS_CP0_CONTEXT:
2150 write_gc0_context(v);
2151 break;
2152 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
2153 if (!cpu_guest_has_contextconfig)
2154 return -EINVAL;
2155 write_gc0_contextconfig(v);
2156 break;
2157 case KVM_REG_MIPS_CP0_USERLOCAL:
2158 if (!cpu_guest_has_userlocal)
2159 return -EINVAL;
2160 write_gc0_userlocal(v);
2161 break;
2162#ifdef CONFIG_64BIT
2163 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
2164 if (!cpu_guest_has_contextconfig)
2165 return -EINVAL;
2166 write_gc0_xcontextconfig(v);
2167 break;
2168#endif
2169 case KVM_REG_MIPS_CP0_PAGEMASK:
2170 write_gc0_pagemask(v);
2171 break;
2172 case KVM_REG_MIPS_CP0_PAGEGRAIN:
2173 write_gc0_pagegrain(v);
2174 break;
2175 case KVM_REG_MIPS_CP0_SEGCTL0:
2176 if (!cpu_guest_has_segments)
2177 return -EINVAL;
2178 write_gc0_segctl0(v);
2179 break;
2180 case KVM_REG_MIPS_CP0_SEGCTL1:
2181 if (!cpu_guest_has_segments)
2182 return -EINVAL;
2183 write_gc0_segctl1(v);
2184 break;
2185 case KVM_REG_MIPS_CP0_SEGCTL2:
2186 if (!cpu_guest_has_segments)
2187 return -EINVAL;
2188 write_gc0_segctl2(v);
2189 break;
2190 case KVM_REG_MIPS_CP0_PWBASE:
2191 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2192 return -EINVAL;
2193 write_gc0_pwbase(v);
2194 break;
2195 case KVM_REG_MIPS_CP0_PWFIELD:
2196 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2197 return -EINVAL;
2198 write_gc0_pwfield(v);
2199 break;
2200 case KVM_REG_MIPS_CP0_PWSIZE:
2201 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2202 return -EINVAL;
2203 write_gc0_pwsize(v);
2204 break;
2205 case KVM_REG_MIPS_CP0_WIRED:
2206 change_gc0_wired(MIPSR6_WIRED_WIRED, v);
2207 break;
2208 case KVM_REG_MIPS_CP0_PWCTL:
2209 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2210 return -EINVAL;
2211 write_gc0_pwctl(v);
2212 break;
2213 case KVM_REG_MIPS_CP0_HWRENA:
2214 write_gc0_hwrena(v);
2215 break;
2216 case KVM_REG_MIPS_CP0_BADVADDR:
2217 write_gc0_badvaddr(v);
2218 break;
2219 case KVM_REG_MIPS_CP0_BADINSTR:
2220 if (!cpu_guest_has_badinstr)
2221 return -EINVAL;
2222 write_gc0_badinstr(v);
2223 break;
2224 case KVM_REG_MIPS_CP0_BADINSTRP:
2225 if (!cpu_guest_has_badinstrp)
2226 return -EINVAL;
2227 write_gc0_badinstrp(v);
2228 break;
2229 case KVM_REG_MIPS_CP0_COUNT:
2230 kvm_mips_write_count(vcpu, v);
2231 break;
2232 case KVM_REG_MIPS_CP0_ENTRYHI:
2233 write_gc0_entryhi(v);
2234 break;
2235 case KVM_REG_MIPS_CP0_COMPARE:
2236 kvm_mips_write_compare(vcpu, v, false);
2237 break;
2238 case KVM_REG_MIPS_CP0_STATUS:
2239 write_gc0_status(v);
2240 break;
2241 case KVM_REG_MIPS_CP0_INTCTL:
2242 write_gc0_intctl(v);
2243 break;
2244 case KVM_REG_MIPS_CP0_CAUSE:
2245
2246
2247
2248
2249
2250 if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
2251 if (v & CAUSEF_DC) {
2252
2253 kvm_mips_count_disable_cause(vcpu);
2254 change_gc0_cause((u32)~CAUSEF_DC, v);
2255 } else {
2256
2257 change_gc0_cause((u32)~CAUSEF_DC, v);
2258 kvm_mips_count_enable_cause(vcpu);
2259 }
2260 } else {
2261 write_gc0_cause(v);
2262 }
2263 break;
2264 case KVM_REG_MIPS_CP0_EPC:
2265 write_gc0_epc(v);
2266 break;
2267 case KVM_REG_MIPS_CP0_PRID:
2268 switch (boot_cpu_type()) {
2269 case CPU_CAVIUM_OCTEON3:
2270
2271 break;
2272 default:
2273 kvm_write_c0_guest_prid(cop0, v);
2274 break;
2275 }
2276 break;
2277 case KVM_REG_MIPS_CP0_EBASE:
2278 kvm_vz_write_gc0_ebase(v);
2279 break;
2280 case KVM_REG_MIPS_CP0_CONFIG:
2281 cur = read_gc0_config();
2282 change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
2283 if (change) {
2284 v = cur ^ change;
2285 write_gc0_config(v);
2286 }
2287 break;
2288 case KVM_REG_MIPS_CP0_CONFIG1:
2289 if (!cpu_guest_has_conf1)
2290 break;
2291 cur = read_gc0_config1();
2292 change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
2293 if (change) {
2294 v = cur ^ change;
2295 write_gc0_config1(v);
2296 }
2297 break;
2298 case KVM_REG_MIPS_CP0_CONFIG2:
2299 if (!cpu_guest_has_conf2)
2300 break;
2301 cur = read_gc0_config2();
2302 change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
2303 if (change) {
2304 v = cur ^ change;
2305 write_gc0_config2(v);
2306 }
2307 break;
2308 case KVM_REG_MIPS_CP0_CONFIG3:
2309 if (!cpu_guest_has_conf3)
2310 break;
2311 cur = read_gc0_config3();
2312 change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
2313 if (change) {
2314 v = cur ^ change;
2315 write_gc0_config3(v);
2316 }
2317 break;
2318 case KVM_REG_MIPS_CP0_CONFIG4:
2319 if (!cpu_guest_has_conf4)
2320 break;
2321 cur = read_gc0_config4();
2322 change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
2323 if (change) {
2324 v = cur ^ change;
2325 write_gc0_config4(v);
2326 }
2327 break;
2328 case KVM_REG_MIPS_CP0_CONFIG5:
2329 if (!cpu_guest_has_conf5)
2330 break;
2331 cur = read_gc0_config5();
2332 change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
2333 if (change) {
2334 v = cur ^ change;
2335 write_gc0_config5(v);
2336 }
2337 break;
2338 case KVM_REG_MIPS_CP0_CONFIG6:
2339 cur = kvm_read_sw_gc0_config6(cop0);
2340 change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
2341 if (change) {
2342 v = cur ^ change;
2343 kvm_write_sw_gc0_config6(cop0, (int)v);
2344 }
2345 break;
2346 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2347 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2348 return -EINVAL;
2349 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2350 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2351 return -EINVAL;
2352 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
2353 break;
2354 case KVM_REG_MIPS_CP0_MAARI:
2355 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2356 return -EINVAL;
2357 kvm_write_maari(vcpu, v);
2358 break;
2359#ifdef CONFIG_64BIT
2360 case KVM_REG_MIPS_CP0_XCONTEXT:
2361 write_gc0_xcontext(v);
2362 break;
2363#endif
2364 case KVM_REG_MIPS_CP0_ERROREPC:
2365 write_gc0_errorepc(v);
2366 break;
2367 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2368 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2369 if (!cpu_guest_has_kscr(idx))
2370 return -EINVAL;
2371 switch (idx) {
2372 case 2:
2373 write_gc0_kscratch1(v);
2374 break;
2375 case 3:
2376 write_gc0_kscratch2(v);
2377 break;
2378 case 4:
2379 write_gc0_kscratch3(v);
2380 break;
2381 case 5:
2382 write_gc0_kscratch4(v);
2383 break;
2384 case 6:
2385 write_gc0_kscratch5(v);
2386 break;
2387 case 7:
2388 write_gc0_kscratch6(v);
2389 break;
2390 }
2391 break;
2392 case KVM_REG_MIPS_COUNT_CTL:
2393 ret = kvm_mips_set_count_ctl(vcpu, v);
2394 break;
2395 case KVM_REG_MIPS_COUNT_RESUME:
2396 ret = kvm_mips_set_count_resume(vcpu, v);
2397 break;
2398 case KVM_REG_MIPS_COUNT_HZ:
2399 ret = kvm_mips_set_count_hz(vcpu, v);
2400 break;
2401 default:
2402 return -EINVAL;
2403 }
2404 return ret;
2405}
2406
2407#define guestid_cache(cpu) (cpu_data[cpu].guestid_cache)
2408static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
2409{
2410 unsigned long guestid = guestid_cache(cpu);
2411
2412 if (!(++guestid & GUESTID_MASK)) {
2413 if (cpu_has_vtag_icache)
2414 flush_icache_all();
2415
2416 if (!guestid)
2417 guestid = GUESTID_FIRST_VERSION;
2418
2419 ++guestid;
2420
2421
2422 kvm_vz_local_flush_roottlb_all_guests();
2423 kvm_vz_local_flush_guesttlb_all();
2424 }
2425
2426 guestid_cache(cpu) = guestid;
2427}
2428
2429
2430static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
2431{
2432 int ret = 0;
2433 int i;
2434
2435 if (!kvm_request_pending(vcpu))
2436 return 0;
2437
2438 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2439 if (cpu_has_guestid) {
2440
2441 for_each_possible_cpu(i)
2442 vcpu->arch.vzguestid[i] = 0;
2443
2444 ret = 1;
2445 }
2446
2447
2448
2449
2450
2451
2452 }
2453
2454 return ret;
2455}
2456
2457static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
2458{
2459 unsigned int wired = read_gc0_wired();
2460 struct kvm_mips_tlb *tlbs;
2461 int i;
2462
2463
2464 wired &= MIPSR6_WIRED_WIRED;
2465 if (wired > vcpu->arch.wired_tlb_limit) {
2466 tlbs = krealloc(vcpu->arch.wired_tlb, wired *
2467 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
2468 if (WARN_ON(!tlbs)) {
2469
2470 wired = vcpu->arch.wired_tlb_limit;
2471 } else {
2472 vcpu->arch.wired_tlb = tlbs;
2473 vcpu->arch.wired_tlb_limit = wired;
2474 }
2475 }
2476
2477 if (wired)
2478
2479 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
2480
2481 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
2482 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
2483 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
2484 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
2485 vcpu->arch.wired_tlb[i].tlb_mask = 0;
2486 }
2487 vcpu->arch.wired_tlb_used = wired;
2488}
2489
2490static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
2491{
2492
2493 if (vcpu->arch.wired_tlb)
2494 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
2495 vcpu->arch.wired_tlb_used);
2496}
2497
2498static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
2499{
2500 struct kvm *kvm = vcpu->kvm;
2501 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
2502 bool migrated;
2503
2504
2505
2506
2507
2508 migrated = (vcpu->arch.last_exec_cpu != cpu);
2509 vcpu->arch.last_exec_cpu = cpu;
2510
2511
2512
2513
2514
2515
2516
2517 if (cpu_has_guestid) {
2518
2519
2520
2521
2522
2523
2524
2525 if (migrated ||
2526 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
2527 GUESTID_VERSION_MASK) {
2528 kvm_vz_get_new_guestid(cpu, vcpu);
2529 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
2530 trace_kvm_guestid_change(vcpu,
2531 vcpu->arch.vzguestid[cpu]);
2532 }
2533
2534
2535 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
2536 } else {
2537
2538
2539
2540
2541
2542
2543
2544 if (migrated || last_exec_vcpu[cpu] != vcpu)
2545 kvm_vz_local_flush_guesttlb_all();
2546 last_exec_vcpu[cpu] = vcpu;
2547
2548
2549
2550
2551
2552 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
2553 get_new_mmu_context(gpa_mm);
2554 else
2555 check_mmu_context(gpa_mm);
2556 }
2557}
2558
2559static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2560{
2561 struct mips_coproc *cop0 = vcpu->arch.cop0;
2562 bool migrated, all;
2563
2564
2565
2566
2567
2568 migrated = (vcpu->arch.last_sched_cpu != cpu);
2569
2570
2571
2572
2573
2574 all = migrated || (last_vcpu[cpu] != vcpu);
2575 last_vcpu[cpu] = vcpu;
2576
2577
2578
2579
2580
2581 kvm_restore_gc0_wired(cop0);
2582 if (current->flags & PF_VCPU) {
2583 tlbw_use_hazard();
2584 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2585 kvm_vz_vcpu_load_wired(vcpu);
2586 }
2587
2588
2589
2590
2591
2592 kvm_vz_restore_timer(vcpu);
2593
2594
2595 if (kvm_trace_guest_mode_change)
2596 set_c0_guestctl0(MIPS_GCTL0_MC);
2597 else
2598 clear_c0_guestctl0(MIPS_GCTL0_MC);
2599
2600
2601 if (!all)
2602 return 0;
2603
2604
2605
2606
2607
2608
2609 kvm_restore_gc0_config(cop0);
2610 if (cpu_guest_has_conf1)
2611 kvm_restore_gc0_config1(cop0);
2612 if (cpu_guest_has_conf2)
2613 kvm_restore_gc0_config2(cop0);
2614 if (cpu_guest_has_conf3)
2615 kvm_restore_gc0_config3(cop0);
2616 if (cpu_guest_has_conf4)
2617 kvm_restore_gc0_config4(cop0);
2618 if (cpu_guest_has_conf5)
2619 kvm_restore_gc0_config5(cop0);
2620 if (cpu_guest_has_conf6)
2621 kvm_restore_gc0_config6(cop0);
2622 if (cpu_guest_has_conf7)
2623 kvm_restore_gc0_config7(cop0);
2624
2625 kvm_restore_gc0_index(cop0);
2626 kvm_restore_gc0_entrylo0(cop0);
2627 kvm_restore_gc0_entrylo1(cop0);
2628 kvm_restore_gc0_context(cop0);
2629 if (cpu_guest_has_contextconfig)
2630 kvm_restore_gc0_contextconfig(cop0);
2631#ifdef CONFIG_64BIT
2632 kvm_restore_gc0_xcontext(cop0);
2633 if (cpu_guest_has_contextconfig)
2634 kvm_restore_gc0_xcontextconfig(cop0);
2635#endif
2636 kvm_restore_gc0_pagemask(cop0);
2637 kvm_restore_gc0_pagegrain(cop0);
2638 kvm_restore_gc0_hwrena(cop0);
2639 kvm_restore_gc0_badvaddr(cop0);
2640 kvm_restore_gc0_entryhi(cop0);
2641 kvm_restore_gc0_status(cop0);
2642 kvm_restore_gc0_intctl(cop0);
2643 kvm_restore_gc0_epc(cop0);
2644 kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
2645 if (cpu_guest_has_userlocal)
2646 kvm_restore_gc0_userlocal(cop0);
2647
2648 kvm_restore_gc0_errorepc(cop0);
2649
2650
2651 if (cpu_guest_has_conf4) {
2652 if (cpu_guest_has_kscr(2))
2653 kvm_restore_gc0_kscratch1(cop0);
2654 if (cpu_guest_has_kscr(3))
2655 kvm_restore_gc0_kscratch2(cop0);
2656 if (cpu_guest_has_kscr(4))
2657 kvm_restore_gc0_kscratch3(cop0);
2658 if (cpu_guest_has_kscr(5))
2659 kvm_restore_gc0_kscratch4(cop0);
2660 if (cpu_guest_has_kscr(6))
2661 kvm_restore_gc0_kscratch5(cop0);
2662 if (cpu_guest_has_kscr(7))
2663 kvm_restore_gc0_kscratch6(cop0);
2664 }
2665
2666 if (cpu_guest_has_badinstr)
2667 kvm_restore_gc0_badinstr(cop0);
2668 if (cpu_guest_has_badinstrp)
2669 kvm_restore_gc0_badinstrp(cop0);
2670
2671 if (cpu_guest_has_segments) {
2672 kvm_restore_gc0_segctl0(cop0);
2673 kvm_restore_gc0_segctl1(cop0);
2674 kvm_restore_gc0_segctl2(cop0);
2675 }
2676
2677
2678 if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
2679 kvm_restore_gc0_pwbase(cop0);
2680 kvm_restore_gc0_pwfield(cop0);
2681 kvm_restore_gc0_pwsize(cop0);
2682 kvm_restore_gc0_pwctl(cop0);
2683 }
2684
2685
2686 if (cpu_has_guestctl2)
2687 write_c0_guestctl2(
2688 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
2689
2690
2691
2692
2693
2694
2695 if (vcpu->kvm->created_vcpus > 1)
2696 write_gc0_lladdr(0);
2697
2698 return 0;
2699}
2700
2701static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
2702{
2703 struct mips_coproc *cop0 = vcpu->arch.cop0;
2704
2705 if (current->flags & PF_VCPU)
2706 kvm_vz_vcpu_save_wired(vcpu);
2707
2708 kvm_lose_fpu(vcpu);
2709
2710 kvm_save_gc0_index(cop0);
2711 kvm_save_gc0_entrylo0(cop0);
2712 kvm_save_gc0_entrylo1(cop0);
2713 kvm_save_gc0_context(cop0);
2714 if (cpu_guest_has_contextconfig)
2715 kvm_save_gc0_contextconfig(cop0);
2716#ifdef CONFIG_64BIT
2717 kvm_save_gc0_xcontext(cop0);
2718 if (cpu_guest_has_contextconfig)
2719 kvm_save_gc0_xcontextconfig(cop0);
2720#endif
2721 kvm_save_gc0_pagemask(cop0);
2722 kvm_save_gc0_pagegrain(cop0);
2723 kvm_save_gc0_wired(cop0);
2724
2725 clear_gc0_wired(MIPSR6_WIRED_WIRED);
2726 kvm_save_gc0_hwrena(cop0);
2727 kvm_save_gc0_badvaddr(cop0);
2728 kvm_save_gc0_entryhi(cop0);
2729 kvm_save_gc0_status(cop0);
2730 kvm_save_gc0_intctl(cop0);
2731 kvm_save_gc0_epc(cop0);
2732 kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
2733 if (cpu_guest_has_userlocal)
2734 kvm_save_gc0_userlocal(cop0);
2735
2736
2737 kvm_save_gc0_config(cop0);
2738 if (cpu_guest_has_conf1)
2739 kvm_save_gc0_config1(cop0);
2740 if (cpu_guest_has_conf2)
2741 kvm_save_gc0_config2(cop0);
2742 if (cpu_guest_has_conf3)
2743 kvm_save_gc0_config3(cop0);
2744 if (cpu_guest_has_conf4)
2745 kvm_save_gc0_config4(cop0);
2746 if (cpu_guest_has_conf5)
2747 kvm_save_gc0_config5(cop0);
2748 if (cpu_guest_has_conf6)
2749 kvm_save_gc0_config6(cop0);
2750 if (cpu_guest_has_conf7)
2751 kvm_save_gc0_config7(cop0);
2752
2753 kvm_save_gc0_errorepc(cop0);
2754
2755
2756 if (cpu_guest_has_conf4) {
2757 if (cpu_guest_has_kscr(2))
2758 kvm_save_gc0_kscratch1(cop0);
2759 if (cpu_guest_has_kscr(3))
2760 kvm_save_gc0_kscratch2(cop0);
2761 if (cpu_guest_has_kscr(4))
2762 kvm_save_gc0_kscratch3(cop0);
2763 if (cpu_guest_has_kscr(5))
2764 kvm_save_gc0_kscratch4(cop0);
2765 if (cpu_guest_has_kscr(6))
2766 kvm_save_gc0_kscratch5(cop0);
2767 if (cpu_guest_has_kscr(7))
2768 kvm_save_gc0_kscratch6(cop0);
2769 }
2770
2771 if (cpu_guest_has_badinstr)
2772 kvm_save_gc0_badinstr(cop0);
2773 if (cpu_guest_has_badinstrp)
2774 kvm_save_gc0_badinstrp(cop0);
2775
2776 if (cpu_guest_has_segments) {
2777 kvm_save_gc0_segctl0(cop0);
2778 kvm_save_gc0_segctl1(cop0);
2779 kvm_save_gc0_segctl2(cop0);
2780 }
2781
2782
2783 if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
2784 kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
2785 kvm_save_gc0_pwbase(cop0);
2786 kvm_save_gc0_pwfield(cop0);
2787 kvm_save_gc0_pwsize(cop0);
2788 kvm_save_gc0_pwctl(cop0);
2789 }
2790
2791 kvm_vz_save_timer(vcpu);
2792
2793
2794 if (cpu_has_guestctl2)
2795 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
2796 read_c0_guestctl2();
2797
2798 return 0;
2799}
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
2812{
2813 unsigned int config4 = 0, ret = 0, limit;
2814
2815
2816 if (cpu_guest_has_conf1)
2817 change_gc0_config1(MIPS_CONF1_TLBS,
2818 (size - 1) << MIPS_CONF1_TLBS_SHIFT);
2819 if (cpu_guest_has_conf4) {
2820 config4 = read_gc0_config4();
2821 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2822 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
2823 config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
2824 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2825 MIPS_CONF4_VTLBSIZEEXT_SHIFT;
2826 } else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2827 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
2828 config4 &= ~MIPS_CONF4_MMUSIZEEXT;
2829 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2830 MIPS_CONF4_MMUSIZEEXT_SHIFT;
2831 }
2832 write_gc0_config4(config4);
2833 }
2834
2835
2836
2837
2838
2839
2840 if (cpu_has_mips_r6) {
2841 limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
2842 MIPSR6_WIRED_LIMIT_SHIFT;
2843 if (size - 1 <= limit)
2844 limit = 0;
2845 write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
2846 }
2847
2848
2849 back_to_back_c0_hazard();
2850 if (cpu_guest_has_conf1)
2851 ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
2852 MIPS_CONF1_TLBS_SHIFT;
2853 if (config4) {
2854 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2855 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
2856 ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
2857 MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
2858 MIPS_CONF1_TLBS_SIZE;
2859 else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2860 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
2861 ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
2862 MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
2863 MIPS_CONF1_TLBS_SIZE;
2864 }
2865 return ret + 1;
2866}
2867
2868static int kvm_vz_hardware_enable(void)
2869{
2870 unsigned int mmu_size, guest_mmu_size, ftlb_size;
2871 u64 guest_cvmctl, cvmvmconfig;
2872
2873 switch (current_cpu_type()) {
2874 case CPU_CAVIUM_OCTEON3:
2875
2876 guest_cvmctl = read_gc0_cvmctl();
2877 guest_cvmctl &= ~CVMCTL_IPTI;
2878 guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
2879 guest_cvmctl &= ~CVMCTL_IPPCI;
2880 guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
2881 write_gc0_cvmctl(guest_cvmctl);
2882
2883 cvmvmconfig = read_c0_cvmvmconfig();
2884
2885 cvmvmconfig |= CVMVMCONF_DGHT;
2886
2887 mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2888 >> CVMVMCONF_MMUSIZEM1_S) + 1;
2889 guest_mmu_size = mmu_size / 2;
2890 mmu_size -= guest_mmu_size;
2891 cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
2892 cvmvmconfig |= mmu_size - 1;
2893 write_c0_cvmvmconfig(cvmvmconfig);
2894
2895
2896 current_cpu_data.tlbsize = mmu_size;
2897 current_cpu_data.tlbsizevtlb = mmu_size;
2898 current_cpu_data.guest.tlbsize = guest_mmu_size;
2899
2900
2901 kvm_vz_local_flush_guesttlb_all();
2902 break;
2903 default:
2904
2905
2906
2907
2908
2909 mmu_size = current_cpu_data.tlbsizevtlb;
2910 ftlb_size = current_cpu_data.tlbsize - mmu_size;
2911
2912
2913 guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
2914 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2915 kvm_vz_local_flush_guesttlb_all();
2916
2917
2918
2919
2920
2921
2922 guest_mmu_size = mmu_size - num_wired_entries() - 2;
2923 guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
2924 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2925
2926
2927
2928
2929
2930
2931
2932 if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
2933 WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
2934 "Available guest VTLB size mismatch"))
2935 return -EINVAL;
2936 break;
2937 }
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948 write_c0_guestctl0(MIPS_GCTL0_CP0 |
2949 (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
2950 MIPS_GCTL0_CG | MIPS_GCTL0_CF);
2951 if (cpu_has_guestctl0ext) {
2952 if (current_cpu_type() != CPU_LOONGSON64)
2953 set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2954 else
2955 clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2956 }
2957
2958 if (cpu_has_guestid) {
2959 write_c0_guestctl1(0);
2960 kvm_vz_local_flush_roottlb_all_guests();
2961
2962 GUESTID_MASK = current_cpu_data.guestid_mask;
2963 GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
2964 GUESTID_VERSION_MASK = ~GUESTID_MASK;
2965
2966 current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
2967 }
2968
2969
2970 if (cpu_has_guestctl2)
2971 clear_c0_guestctl2(0x3f << 10);
2972
2973#ifdef CONFIG_CPU_LOONGSON64
2974
2975 if (cpu_has_csr())
2976 csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
2977#endif
2978
2979 return 0;
2980}
2981
2982static void kvm_vz_hardware_disable(void)
2983{
2984 u64 cvmvmconfig;
2985 unsigned int mmu_size;
2986
2987
2988 kvm_vz_local_flush_guesttlb_all();
2989
2990 switch (current_cpu_type()) {
2991 case CPU_CAVIUM_OCTEON3:
2992
2993
2994
2995
2996
2997 cvmvmconfig = read_c0_cvmvmconfig();
2998 mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2999 >> CVMVMCONF_MMUSIZEM1_S) + 1;
3000 cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
3001 cvmvmconfig |= mmu_size - 1;
3002 write_c0_cvmvmconfig(cvmvmconfig);
3003
3004
3005 current_cpu_data.tlbsize = mmu_size;
3006 current_cpu_data.tlbsizevtlb = mmu_size;
3007 current_cpu_data.guest.tlbsize = 0;
3008
3009
3010 local_flush_tlb_all();
3011 break;
3012 }
3013
3014 if (cpu_has_guestid) {
3015 write_c0_guestctl1(0);
3016 kvm_vz_local_flush_roottlb_all_guests();
3017 }
3018}
3019
3020static int kvm_vz_check_extension(struct kvm *kvm, long ext)
3021{
3022 int r;
3023
3024 switch (ext) {
3025 case KVM_CAP_MIPS_VZ:
3026
3027 r = 1;
3028 break;
3029#ifdef CONFIG_64BIT
3030 case KVM_CAP_MIPS_64BIT:
3031
3032 r = 2;
3033 break;
3034#endif
3035 case KVM_CAP_IOEVENTFD:
3036 r = 1;
3037 break;
3038 default:
3039 r = 0;
3040 break;
3041 }
3042
3043 return r;
3044}
3045
3046static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
3047{
3048 int i;
3049
3050 for_each_possible_cpu(i)
3051 vcpu->arch.vzguestid[i] = 0;
3052
3053 return 0;
3054}
3055
3056static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
3057{
3058 int cpu;
3059
3060
3061
3062
3063
3064
3065 for_each_possible_cpu(cpu) {
3066 if (last_vcpu[cpu] == vcpu)
3067 last_vcpu[cpu] = NULL;
3068 if (last_exec_vcpu[cpu] == vcpu)
3069 last_exec_vcpu[cpu] = NULL;
3070 }
3071}
3072
3073static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
3074{
3075 struct mips_coproc *cop0 = vcpu->arch.cop0;
3076 unsigned long count_hz = 100*1000*1000;
3077
3078
3079
3080
3081
3082 if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
3083 count_hz = mips_hpt_frequency;
3084 kvm_mips_init_count(vcpu, count_hz);
3085
3086
3087
3088
3089
3090
3091 if (cpu_has_mips_r5 || cpu_has_mips_r6)
3092 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
3093
3094 if (cpu_has_mips_r6)
3095 kvm_write_sw_gc0_wired(cop0,
3096 read_gc0_wired() & MIPSR6_WIRED_LIMIT);
3097
3098 kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
3099 if (cpu_has_mips_r5 || cpu_has_mips_r6)
3100 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
3101
3102 kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
3103 (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
3104
3105 kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
3106
3107 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
3108
3109 kvm_save_gc0_config(cop0);
3110
3111 kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
3112 _page_cachable_default >> _CACHE_SHIFT);
3113
3114 kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
3115 if (cpu_guest_has_conf1) {
3116 kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
3117
3118 kvm_save_gc0_config1(cop0);
3119
3120 kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 |
3121 MIPS_CONF1_MD |
3122 MIPS_CONF1_PC |
3123 MIPS_CONF1_WR |
3124 MIPS_CONF1_CA |
3125 MIPS_CONF1_FP);
3126 }
3127 if (cpu_guest_has_conf2) {
3128 kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
3129
3130 kvm_save_gc0_config2(cop0);
3131 }
3132 if (cpu_guest_has_conf3) {
3133 kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
3134
3135 kvm_save_gc0_config3(cop0);
3136
3137 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
3138
3139 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA |
3140 MIPS_CONF3_BPG |
3141 MIPS_CONF3_ULRI |
3142 MIPS_CONF3_DSP |
3143 MIPS_CONF3_CTXTC |
3144 MIPS_CONF3_ITL |
3145 MIPS_CONF3_LPA |
3146 MIPS_CONF3_VEIC |
3147 MIPS_CONF3_VINT |
3148 MIPS_CONF3_SP |
3149 MIPS_CONF3_CDMM |
3150 MIPS_CONF3_MT |
3151 MIPS_CONF3_SM |
3152 MIPS_CONF3_TL);
3153 }
3154 if (cpu_guest_has_conf4) {
3155 kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
3156
3157 kvm_save_gc0_config4(cop0);
3158 }
3159 if (cpu_guest_has_conf5) {
3160 kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
3161
3162 kvm_save_gc0_config5(cop0);
3163
3164 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K |
3165 MIPS_CONF5_CV |
3166 MIPS_CONF5_MSAEN |
3167 MIPS_CONF5_UFE |
3168 MIPS_CONF5_FRE |
3169 MIPS_CONF5_SBRI |
3170 MIPS_CONF5_UFR);
3171
3172 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
3173 }
3174
3175 if (cpu_guest_has_contextconfig) {
3176
3177 kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
3178#ifdef CONFIG_64BIT
3179
3180
3181 kvm_write_sw_gc0_xcontextconfig(cop0,
3182 ((1ull << (cpu_vmbits - 13)) - 1) << 4);
3183#endif
3184 }
3185
3186
3187 if (cpu_guest_has_segments) {
3188
3189 kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
3190 kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
3191 (_page_cachable_default >> _CACHE_SHIFT) <<
3192 (16 + MIPS_SEGCFG_C_SHIFT));
3193 kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
3194 }
3195
3196
3197 if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) {
3198
3199 kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
3200
3201 kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
3202 }
3203
3204
3205 if (cpu_has_guestctl2)
3206 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
3207
3208
3209 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
3210
3211 return 0;
3212}
3213
3214static void kvm_vz_flush_shadow_all(struct kvm *kvm)
3215{
3216 if (cpu_has_guestid) {
3217
3218 kvm_flush_remote_tlbs(kvm);
3219 } else {
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229 cpumask_setall(&kvm->arch.asid_flush_mask);
3230 kvm_flush_remote_tlbs(kvm);
3231 }
3232}
3233
3234static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
3235 const struct kvm_memory_slot *slot)
3236{
3237 kvm_vz_flush_shadow_all(kvm);
3238}
3239
3240static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
3241{
3242 int cpu = smp_processor_id();
3243 int preserve_guest_tlb;
3244
3245 preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
3246
3247 if (preserve_guest_tlb)
3248 kvm_vz_vcpu_save_wired(vcpu);
3249
3250 kvm_vz_vcpu_load_tlb(vcpu, cpu);
3251
3252 if (preserve_guest_tlb)
3253 kvm_vz_vcpu_load_wired(vcpu);
3254}
3255
3256static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
3257{
3258 int cpu = smp_processor_id();
3259 int r;
3260
3261 kvm_vz_acquire_htimer(vcpu);
3262
3263 kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
3264
3265 kvm_vz_check_requests(vcpu, cpu);
3266 kvm_vz_vcpu_load_tlb(vcpu, cpu);
3267 kvm_vz_vcpu_load_wired(vcpu);
3268
3269 r = vcpu->arch.vcpu_run(vcpu->run, vcpu);
3270
3271 kvm_vz_vcpu_save_wired(vcpu);
3272
3273 return r;
3274}
3275
3276static struct kvm_mips_callbacks kvm_vz_callbacks = {
3277 .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
3278 .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
3279 .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
3280 .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
3281 .handle_addr_err_st = kvm_trap_vz_no_handler,
3282 .handle_addr_err_ld = kvm_trap_vz_no_handler,
3283 .handle_syscall = kvm_trap_vz_no_handler,
3284 .handle_res_inst = kvm_trap_vz_no_handler,
3285 .handle_break = kvm_trap_vz_no_handler,
3286 .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
3287 .handle_guest_exit = kvm_trap_vz_handle_guest_exit,
3288
3289 .hardware_enable = kvm_vz_hardware_enable,
3290 .hardware_disable = kvm_vz_hardware_disable,
3291 .check_extension = kvm_vz_check_extension,
3292 .vcpu_init = kvm_vz_vcpu_init,
3293 .vcpu_uninit = kvm_vz_vcpu_uninit,
3294 .vcpu_setup = kvm_vz_vcpu_setup,
3295 .flush_shadow_all = kvm_vz_flush_shadow_all,
3296 .flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
3297 .gva_to_gpa = kvm_vz_gva_to_gpa_cb,
3298 .queue_timer_int = kvm_vz_queue_timer_int_cb,
3299 .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
3300 .queue_io_int = kvm_vz_queue_io_int_cb,
3301 .dequeue_io_int = kvm_vz_dequeue_io_int_cb,
3302 .irq_deliver = kvm_vz_irq_deliver_cb,
3303 .irq_clear = kvm_vz_irq_clear_cb,
3304 .num_regs = kvm_vz_num_regs,
3305 .copy_reg_indices = kvm_vz_copy_reg_indices,
3306 .get_one_reg = kvm_vz_get_one_reg,
3307 .set_one_reg = kvm_vz_set_one_reg,
3308 .vcpu_load = kvm_vz_vcpu_load,
3309 .vcpu_put = kvm_vz_vcpu_put,
3310 .vcpu_run = kvm_vz_vcpu_run,
3311 .vcpu_reenter = kvm_vz_vcpu_reenter,
3312};
3313
3314int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
3315{
3316 if (!cpu_has_vz)
3317 return -ENODEV;
3318
3319
3320
3321
3322
3323 if (WARN(pgd_reg == -1,
3324 "pgd_reg not allocated even though cpu_has_vz\n"))
3325 return -ENODEV;
3326
3327 pr_info("Starting KVM with MIPS VZ extensions\n");
3328
3329 *install_callbacks = &kvm_vz_callbacks;
3330 return 0;
3331}
3332