1
2
3
4
5
6
7
8
9
10
11
12#include <linux/highmem.h>
13#include <linux/kvm_host.h>
14#include <linux/uaccess.h>
15#include <asm/mmu_context.h>
16#include <asm/pgalloc.h>
17
18
19
20
21
22#if defined(__PAGETABLE_PMD_FOLDED)
23#define KVM_MMU_CACHE_MIN_PAGES 1
24#else
25#define KVM_MMU_CACHE_MIN_PAGES 2
26#endif
27
28void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
29{
30 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
31}
32
33
34
35
36
37
38
39
40
41
42
43static void kvm_pgd_init(void *page)
44{
45 unsigned long *p, *end;
46 unsigned long entry;
47
48#ifdef __PAGETABLE_PMD_FOLDED
49 entry = (unsigned long)invalid_pte_table;
50#else
51 entry = (unsigned long)invalid_pmd_table;
52#endif
53
54 p = (unsigned long *)page;
55 end = p + PTRS_PER_PGD;
56
57 do {
58 p[0] = entry;
59 p[1] = entry;
60 p[2] = entry;
61 p[3] = entry;
62 p[4] = entry;
63 p += 8;
64 p[-3] = entry;
65 p[-2] = entry;
66 p[-1] = entry;
67 } while (p != end);
68}
69
70
71
72
73
74
75
76
77
78
79pgd_t *kvm_pgd_alloc(void)
80{
81 pgd_t *ret;
82
83 ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER);
84 if (ret)
85 kvm_pgd_init(ret);
86
87 return ret;
88}
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
105 unsigned long addr)
106{
107 p4d_t *p4d;
108 pud_t *pud;
109 pmd_t *pmd;
110
111 pgd += pgd_index(addr);
112 if (pgd_none(*pgd)) {
113
114 BUG();
115 return NULL;
116 }
117 p4d = p4d_offset(pgd, addr);
118 pud = pud_offset(p4d, addr);
119 if (pud_none(*pud)) {
120 pmd_t *new_pmd;
121
122 if (!cache)
123 return NULL;
124 new_pmd = kvm_mmu_memory_cache_alloc(cache);
125 pmd_init((unsigned long)new_pmd,
126 (unsigned long)invalid_pte_table);
127 pud_populate(NULL, pud, new_pmd);
128 }
129 pmd = pmd_offset(pud, addr);
130 if (pmd_none(*pmd)) {
131 pte_t *new_pte;
132
133 if (!cache)
134 return NULL;
135 new_pte = kvm_mmu_memory_cache_alloc(cache);
136 clear_page(new_pte);
137 pmd_populate_kernel(NULL, pmd, new_pte);
138 }
139 return pte_offset_kernel(pmd, addr);
140}
141
142
143static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
144 struct kvm_mmu_memory_cache *cache,
145 unsigned long addr)
146{
147 return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
148}
149
150
151
152
153
154
155static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa,
156 unsigned long end_gpa)
157{
158 int i_min = pte_index(start_gpa);
159 int i_max = pte_index(end_gpa);
160 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
161 int i;
162
163 for (i = i_min; i <= i_max; ++i) {
164 if (!pte_present(pte[i]))
165 continue;
166
167 set_pte(pte + i, __pte(0));
168 }
169 return safe_to_remove;
170}
171
172static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
173 unsigned long end_gpa)
174{
175 pte_t *pte;
176 unsigned long end = ~0ul;
177 int i_min = pmd_index(start_gpa);
178 int i_max = pmd_index(end_gpa);
179 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
180 int i;
181
182 for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
183 if (!pmd_present(pmd[i]))
184 continue;
185
186 pte = pte_offset_kernel(pmd + i, 0);
187 if (i == i_max)
188 end = end_gpa;
189
190 if (kvm_mips_flush_gpa_pte(pte, start_gpa, end)) {
191 pmd_clear(pmd + i);
192 pte_free_kernel(NULL, pte);
193 } else {
194 safe_to_remove = false;
195 }
196 }
197 return safe_to_remove;
198}
199
200static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa,
201 unsigned long end_gpa)
202{
203 pmd_t *pmd;
204 unsigned long end = ~0ul;
205 int i_min = pud_index(start_gpa);
206 int i_max = pud_index(end_gpa);
207 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
208 int i;
209
210 for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
211 if (!pud_present(pud[i]))
212 continue;
213
214 pmd = pmd_offset(pud + i, 0);
215 if (i == i_max)
216 end = end_gpa;
217
218 if (kvm_mips_flush_gpa_pmd(pmd, start_gpa, end)) {
219 pud_clear(pud + i);
220 pmd_free(NULL, pmd);
221 } else {
222 safe_to_remove = false;
223 }
224 }
225 return safe_to_remove;
226}
227
228static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa,
229 unsigned long end_gpa)
230{
231 p4d_t *p4d;
232 pud_t *pud;
233 unsigned long end = ~0ul;
234 int i_min = pgd_index(start_gpa);
235 int i_max = pgd_index(end_gpa);
236 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
237 int i;
238
239 for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
240 if (!pgd_present(pgd[i]))
241 continue;
242
243 p4d = p4d_offset(pgd, 0);
244 pud = pud_offset(p4d + i, 0);
245 if (i == i_max)
246 end = end_gpa;
247
248 if (kvm_mips_flush_gpa_pud(pud, start_gpa, end)) {
249 pgd_clear(pgd + i);
250 pud_free(NULL, pud);
251 } else {
252 safe_to_remove = false;
253 }
254 }
255 return safe_to_remove;
256}
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
272{
273 return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd,
274 start_gfn << PAGE_SHIFT,
275 end_gfn << PAGE_SHIFT);
276}
277
278#define BUILD_PTE_RANGE_OP(name, op) \
279static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
280 unsigned long end) \
281{ \
282 int ret = 0; \
283 int i_min = pte_index(start); \
284 int i_max = pte_index(end); \
285 int i; \
286 pte_t old, new; \
287 \
288 for (i = i_min; i <= i_max; ++i) { \
289 if (!pte_present(pte[i])) \
290 continue; \
291 \
292 old = pte[i]; \
293 new = op(old); \
294 if (pte_val(new) == pte_val(old)) \
295 continue; \
296 set_pte(pte + i, new); \
297 ret = 1; \
298 } \
299 return ret; \
300} \
301 \
302 \
303static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
304 unsigned long end) \
305{ \
306 int ret = 0; \
307 pte_t *pte; \
308 unsigned long cur_end = ~0ul; \
309 int i_min = pmd_index(start); \
310 int i_max = pmd_index(end); \
311 int i; \
312 \
313 for (i = i_min; i <= i_max; ++i, start = 0) { \
314 if (!pmd_present(pmd[i])) \
315 continue; \
316 \
317 pte = pte_offset_kernel(pmd + i, 0); \
318 if (i == i_max) \
319 cur_end = end; \
320 \
321 ret |= kvm_mips_##name##_pte(pte, start, cur_end); \
322 } \
323 return ret; \
324} \
325 \
326static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \
327 unsigned long end) \
328{ \
329 int ret = 0; \
330 pmd_t *pmd; \
331 unsigned long cur_end = ~0ul; \
332 int i_min = pud_index(start); \
333 int i_max = pud_index(end); \
334 int i; \
335 \
336 for (i = i_min; i <= i_max; ++i, start = 0) { \
337 if (!pud_present(pud[i])) \
338 continue; \
339 \
340 pmd = pmd_offset(pud + i, 0); \
341 if (i == i_max) \
342 cur_end = end; \
343 \
344 ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \
345 } \
346 return ret; \
347} \
348 \
349static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \
350 unsigned long end) \
351{ \
352 int ret = 0; \
353 p4d_t *p4d; \
354 pud_t *pud; \
355 unsigned long cur_end = ~0ul; \
356 int i_min = pgd_index(start); \
357 int i_max = pgd_index(end); \
358 int i; \
359 \
360 for (i = i_min; i <= i_max; ++i, start = 0) { \
361 if (!pgd_present(pgd[i])) \
362 continue; \
363 \
364 p4d = p4d_offset(pgd, 0); \
365 pud = pud_offset(p4d + i, 0); \
366 if (i == i_max) \
367 cur_end = end; \
368 \
369 ret |= kvm_mips_##name##_pud(pud, start, cur_end); \
370 } \
371 return ret; \
372}
373
374
375
376
377
378
379
380BUILD_PTE_RANGE_OP(mkclean, pte_mkclean)
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
398{
399 return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd,
400 start_gfn << PAGE_SHIFT,
401 end_gfn << PAGE_SHIFT);
402}
403
404
405
406
407
408
409
410
411
412
413
414
415void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
416 struct kvm_memory_slot *slot,
417 gfn_t gfn_offset, unsigned long mask)
418{
419 gfn_t base_gfn = slot->base_gfn + gfn_offset;
420 gfn_t start = base_gfn + __ffs(mask);
421 gfn_t end = base_gfn + __fls(mask);
422
423 kvm_mips_mkclean_gpa_pt(kvm, start, end);
424}
425
426
427
428
429
430
431
432BUILD_PTE_RANGE_OP(mkold, pte_mkold)
433
434static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
435 gfn_t end_gfn)
436{
437 return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd,
438 start_gfn << PAGE_SHIFT,
439 end_gfn << PAGE_SHIFT);
440}
441
442static int handle_hva_to_gpa(struct kvm *kvm,
443 unsigned long start,
444 unsigned long end,
445 int (*handler)(struct kvm *kvm, gfn_t gfn,
446 gpa_t gfn_end,
447 struct kvm_memory_slot *memslot,
448 void *data),
449 void *data)
450{
451 struct kvm_memslots *slots;
452 struct kvm_memory_slot *memslot;
453 int ret = 0;
454
455 slots = kvm_memslots(kvm);
456
457
458 kvm_for_each_memslot(memslot, slots) {
459 unsigned long hva_start, hva_end;
460 gfn_t gfn, gfn_end;
461
462 hva_start = max(start, memslot->userspace_addr);
463 hva_end = min(end, memslot->userspace_addr +
464 (memslot->npages << PAGE_SHIFT));
465 if (hva_start >= hva_end)
466 continue;
467
468
469
470
471
472 gfn = hva_to_gfn_memslot(hva_start, memslot);
473 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
474
475 ret |= handler(kvm, gfn, gfn_end, memslot, data);
476 }
477
478 return ret;
479}
480
481
482static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
483 struct kvm_memory_slot *memslot, void *data)
484{
485 kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
486 return 1;
487}
488
489int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
490 unsigned flags)
491{
492 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
493
494 kvm_mips_callbacks->flush_shadow_all(kvm);
495 return 0;
496}
497
498static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
499 struct kvm_memory_slot *memslot, void *data)
500{
501 gpa_t gpa = gfn << PAGE_SHIFT;
502 pte_t hva_pte = *(pte_t *)data;
503 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
504 pte_t old_pte;
505
506 if (!gpa_pte)
507 return 0;
508
509
510 old_pte = *gpa_pte;
511 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
512 hva_pte = pte_mkclean(hva_pte);
513 else if (memslot->flags & KVM_MEM_READONLY)
514 hva_pte = pte_wrprotect(hva_pte);
515
516 set_pte(gpa_pte, hva_pte);
517
518
519 if (!pte_present(old_pte) || !pte_young(old_pte))
520 return 0;
521
522
523 return !pte_present(hva_pte) ||
524 !pte_young(hva_pte) ||
525 pte_pfn(old_pte) != pte_pfn(hva_pte) ||
526 (pte_dirty(old_pte) && !pte_dirty(hva_pte));
527}
528
529int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
530{
531 unsigned long end = hva + PAGE_SIZE;
532 int ret;
533
534 ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
535 if (ret)
536 kvm_mips_callbacks->flush_shadow_all(kvm);
537 return 0;
538}
539
540static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
541 struct kvm_memory_slot *memslot, void *data)
542{
543 return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
544}
545
546static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
547 struct kvm_memory_slot *memslot, void *data)
548{
549 gpa_t gpa = gfn << PAGE_SHIFT;
550 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
551
552 if (!gpa_pte)
553 return 0;
554 return pte_young(*gpa_pte);
555}
556
557int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
558{
559 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
560}
561
562int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
563{
564 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
565}
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
586 bool write_fault,
587 pte_t *out_entry, pte_t *out_buddy)
588{
589 struct kvm *kvm = vcpu->kvm;
590 gfn_t gfn = gpa >> PAGE_SHIFT;
591 pte_t *ptep;
592 kvm_pfn_t pfn = 0;
593 bool pfn_valid = false;
594 int ret = 0;
595
596 spin_lock(&kvm->mmu_lock);
597
598
599 ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
600 if (!ptep || !pte_present(*ptep)) {
601 ret = -EFAULT;
602 goto out;
603 }
604
605
606 if (!pte_young(*ptep)) {
607 set_pte(ptep, pte_mkyoung(*ptep));
608 pfn = pte_pfn(*ptep);
609 pfn_valid = true;
610
611 }
612 if (write_fault && !pte_dirty(*ptep)) {
613 if (!pte_write(*ptep)) {
614 ret = -EFAULT;
615 goto out;
616 }
617
618
619 set_pte(ptep, pte_mkdirty(*ptep));
620 pfn = pte_pfn(*ptep);
621 mark_page_dirty(kvm, gfn);
622 kvm_set_pfn_dirty(pfn);
623 }
624
625 if (out_entry)
626 *out_entry = *ptep;
627 if (out_buddy)
628 *out_buddy = *ptep_buddy(ptep);
629
630out:
631 spin_unlock(&kvm->mmu_lock);
632 if (pfn_valid)
633 kvm_set_pfn_accessed(pfn);
634 return ret;
635}
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
662 bool write_fault,
663 pte_t *out_entry, pte_t *out_buddy)
664{
665 struct kvm *kvm = vcpu->kvm;
666 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
667 gfn_t gfn = gpa >> PAGE_SHIFT;
668 int srcu_idx, err;
669 kvm_pfn_t pfn;
670 pte_t *ptep, entry, old_pte;
671 bool writeable;
672 unsigned long prot_bits;
673 unsigned long mmu_seq;
674
675
676 srcu_idx = srcu_read_lock(&kvm->srcu);
677 err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry,
678 out_buddy);
679 if (!err)
680 goto out;
681
682
683 err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
684 if (err)
685 goto out;
686
687retry:
688
689
690
691
692 mmu_seq = kvm->mmu_notifier_seq;
693
694
695
696
697
698
699
700
701
702
703
704 smp_rmb();
705
706
707 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
708 if (is_error_noslot_pfn(pfn)) {
709 err = -EFAULT;
710 goto out;
711 }
712
713 spin_lock(&kvm->mmu_lock);
714
715 if (mmu_notifier_retry(kvm, mmu_seq)) {
716
717
718
719
720
721 spin_unlock(&kvm->mmu_lock);
722 kvm_release_pfn_clean(pfn);
723 goto retry;
724 }
725
726
727 ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa);
728
729
730 prot_bits = _PAGE_PRESENT | __READABLE | _page_cachable_default;
731 if (writeable) {
732 prot_bits |= _PAGE_WRITE;
733 if (write_fault) {
734 prot_bits |= __WRITEABLE;
735 mark_page_dirty(kvm, gfn);
736 kvm_set_pfn_dirty(pfn);
737 }
738 }
739 entry = pfn_pte(pfn, __pgprot(prot_bits));
740
741
742 old_pte = *ptep;
743 set_pte(ptep, entry);
744
745 err = 0;
746 if (out_entry)
747 *out_entry = *ptep;
748 if (out_buddy)
749 *out_buddy = *ptep_buddy(ptep);
750
751 spin_unlock(&kvm->mmu_lock);
752 kvm_release_pfn_clean(pfn);
753 kvm_set_pfn_accessed(pfn);
754out:
755 srcu_read_unlock(&kvm->srcu, srcu_idx);
756 return err;
757}
758
759static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
760 unsigned long addr)
761{
762 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
763 pgd_t *pgdp;
764 int ret;
765
766
767 ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
768 if (ret)
769 return NULL;
770
771 if (KVM_GUEST_KERNEL_MODE(vcpu))
772 pgdp = vcpu->arch.guest_kernel_mm.pgd;
773 else
774 pgdp = vcpu->arch.guest_user_mm.pgd;
775
776 return kvm_mips_walk_pgd(pgdp, memcache, addr);
777}
778
779void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
780 bool user)
781{
782 pgd_t *pgdp;
783 pte_t *ptep;
784
785 addr &= PAGE_MASK << 1;
786
787 pgdp = vcpu->arch.guest_kernel_mm.pgd;
788 ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
789 if (ptep) {
790 ptep[0] = pfn_pte(0, __pgprot(0));
791 ptep[1] = pfn_pte(0, __pgprot(0));
792 }
793
794 if (user) {
795 pgdp = vcpu->arch.guest_user_mm.pgd;
796 ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
797 if (ptep) {
798 ptep[0] = pfn_pte(0, __pgprot(0));
799 ptep[1] = pfn_pte(0, __pgprot(0));
800 }
801 }
802}
803
804
805
806
807
808
809static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
810 unsigned long end_gva)
811{
812 int i_min = pte_index(start_gva);
813 int i_max = pte_index(end_gva);
814 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
815 int i;
816
817
818
819
820
821 if (safe_to_remove)
822 return true;
823
824 for (i = i_min; i <= i_max; ++i) {
825 if (!pte_present(pte[i]))
826 continue;
827
828 set_pte(pte + i, __pte(0));
829 }
830 return false;
831}
832
833static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
834 unsigned long end_gva)
835{
836 pte_t *pte;
837 unsigned long end = ~0ul;
838 int i_min = pmd_index(start_gva);
839 int i_max = pmd_index(end_gva);
840 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
841 int i;
842
843 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
844 if (!pmd_present(pmd[i]))
845 continue;
846
847 pte = pte_offset_kernel(pmd + i, 0);
848 if (i == i_max)
849 end = end_gva;
850
851 if (kvm_mips_flush_gva_pte(pte, start_gva, end)) {
852 pmd_clear(pmd + i);
853 pte_free_kernel(NULL, pte);
854 } else {
855 safe_to_remove = false;
856 }
857 }
858 return safe_to_remove;
859}
860
861static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
862 unsigned long end_gva)
863{
864 pmd_t *pmd;
865 unsigned long end = ~0ul;
866 int i_min = pud_index(start_gva);
867 int i_max = pud_index(end_gva);
868 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
869 int i;
870
871 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
872 if (!pud_present(pud[i]))
873 continue;
874
875 pmd = pmd_offset(pud + i, 0);
876 if (i == i_max)
877 end = end_gva;
878
879 if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) {
880 pud_clear(pud + i);
881 pmd_free(NULL, pmd);
882 } else {
883 safe_to_remove = false;
884 }
885 }
886 return safe_to_remove;
887}
888
889static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
890 unsigned long end_gva)
891{
892 p4d_t *p4d;
893 pud_t *pud;
894 unsigned long end = ~0ul;
895 int i_min = pgd_index(start_gva);
896 int i_max = pgd_index(end_gva);
897 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
898 int i;
899
900 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
901 if (!pgd_present(pgd[i]))
902 continue;
903
904 p4d = p4d_offset(pgd, 0);
905 pud = pud_offset(p4d + i, 0);
906 if (i == i_max)
907 end = end_gva;
908
909 if (kvm_mips_flush_gva_pud(pud, start_gva, end)) {
910 pgd_clear(pgd + i);
911 pud_free(NULL, pud);
912 } else {
913 safe_to_remove = false;
914 }
915 }
916 return safe_to_remove;
917}
918
919void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags)
920{
921 if (flags & KMF_GPA) {
922
923 if (flags & KMF_KERN)
924
925 kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff);
926 else
927
928 kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
929 } else {
930
931 kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
932
933
934 if (flags & KMF_KERN)
935 kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff);
936 }
937}
938
939static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte)
940{
941
942
943
944
945
946 if (!pte_dirty(pte))
947 pte = pte_wrprotect(pte);
948
949 return pte;
950}
951
952static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
953{
954
955 if (!(entrylo & ENTRYLO_D))
956 pte = pte_mkclean(pte);
957
958 return kvm_mips_gpa_pte_to_gva_unmapped(pte);
959}
960
961#ifdef CONFIG_KVM_MIPS_VZ
962int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
963 struct kvm_vcpu *vcpu,
964 bool write_fault)
965{
966 int ret;
967
968 ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
969 if (ret)
970 return ret;
971
972
973 return kvm_vz_host_tlb_inv(vcpu, badvaddr);
974}
975#endif
976
977
978int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
979 struct kvm_vcpu *vcpu,
980 bool write_fault)
981{
982 unsigned long gpa;
983 pte_t pte_gpa[2], *ptep_gva;
984 int idx;
985
986 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
987 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
988 kvm_mips_dump_host_tlbs();
989 return -1;
990 }
991
992
993 gpa = KVM_GUEST_CPHYSADDR(badvaddr);
994 idx = (badvaddr >> PAGE_SHIFT) & 1;
995 if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx],
996 &pte_gpa[!idx]) < 0)
997 return -1;
998
999
1000 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE);
1001 if (!ptep_gva) {
1002 kvm_err("No ptep for gva %lx\n", badvaddr);
1003 return -1;
1004 }
1005
1006
1007 ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]);
1008 ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]);
1009
1010
1011 kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
1012 return 0;
1013}
1014
1015int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
1016 struct kvm_mips_tlb *tlb,
1017 unsigned long gva,
1018 bool write_fault)
1019{
1020 struct kvm *kvm = vcpu->kvm;
1021 long tlb_lo[2];
1022 pte_t pte_gpa[2], *ptep_buddy, *ptep_gva;
1023 unsigned int idx = TLB_LO_IDX(*tlb, gva);
1024 bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
1025
1026 tlb_lo[0] = tlb->tlb_lo[0];
1027 tlb_lo[1] = tlb->tlb_lo[1];
1028
1029
1030
1031
1032
1033 if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1)))
1034 tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0;
1035
1036
1037 if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]),
1038 write_fault, &pte_gpa[idx], NULL) < 0)
1039 return -1;
1040
1041
1042 pte_gpa[!idx] = pfn_pte(0, __pgprot(0));
1043 if (tlb_lo[!idx] & ENTRYLO_V) {
1044 spin_lock(&kvm->mmu_lock);
1045 ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL,
1046 mips3_tlbpfn_to_paddr(tlb_lo[!idx]));
1047 if (ptep_buddy)
1048 pte_gpa[!idx] = *ptep_buddy;
1049 spin_unlock(&kvm->mmu_lock);
1050 }
1051
1052
1053 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE);
1054 if (!ptep_gva) {
1055 kvm_err("No ptep for gva %lx\n", gva);
1056 return -1;
1057 }
1058
1059
1060 ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]);
1061 ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]);
1062
1063
1064 kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel);
1065
1066 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
1067 tlb->tlb_lo[0], tlb->tlb_lo[1]);
1068
1069 return 0;
1070}
1071
1072int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
1073 struct kvm_vcpu *vcpu)
1074{
1075 kvm_pfn_t pfn;
1076 pte_t *ptep;
1077 pgprot_t prot;
1078
1079 ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr);
1080 if (!ptep) {
1081 kvm_err("No ptep for commpage %lx\n", badvaddr);
1082 return -1;
1083 }
1084
1085 pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
1086
1087 prot = vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED);
1088 *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, prot)));
1089
1090
1091 kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
1092 return 0;
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
1107{
1108 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
1109 hrtimer_restart(&vcpu->arch.comparecount_timer);
1110}
1111
1112
1113void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1114{
1115 unsigned long flags;
1116
1117 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
1118
1119 local_irq_save(flags);
1120
1121 vcpu->cpu = cpu;
1122 if (vcpu->arch.last_sched_cpu != cpu) {
1123 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
1124 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
1125
1126
1127
1128
1129
1130 kvm_mips_migrate_count(vcpu);
1131 }
1132
1133
1134 kvm_mips_callbacks->vcpu_load(vcpu, cpu);
1135
1136 local_irq_restore(flags);
1137}
1138
1139
1140void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1141{
1142 unsigned long flags;
1143 int cpu;
1144
1145 local_irq_save(flags);
1146
1147 cpu = smp_processor_id();
1148 vcpu->arch.last_sched_cpu = cpu;
1149 vcpu->cpu = -1;
1150
1151
1152 kvm_mips_callbacks->vcpu_put(vcpu, cpu);
1153
1154 local_irq_restore(flags);
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
1174 unsigned long gva,
1175 bool write)
1176{
1177 struct mips_coproc *cop0 = vcpu->arch.cop0;
1178 struct kvm_mips_tlb *tlb;
1179 int index;
1180
1181 if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
1182 if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0)
1183 return KVM_MIPS_GPA;
1184 } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
1185 KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
1186
1187 index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) |
1188 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID));
1189 if (index < 0)
1190 return KVM_MIPS_TLB;
1191 tlb = &vcpu->arch.guest_tlb[index];
1192
1193
1194 if (!TLB_IS_VALID(*tlb, gva))
1195 return KVM_MIPS_TLBINV;
1196 if (write && !TLB_IS_DIRTY(*tlb, gva))
1197 return KVM_MIPS_TLBMOD;
1198
1199 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write))
1200 return KVM_MIPS_GPA;
1201 } else {
1202 return KVM_MIPS_GVA;
1203 }
1204
1205 return KVM_MIPS_MAPPED;
1206}
1207
1208int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
1209{
1210 int err;
1211
1212 if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ),
1213 "Expect BadInstr/BadInstrP registers to be used with VZ\n"))
1214 return -EINVAL;
1215
1216retry:
1217 kvm_trap_emul_gva_lockless_begin(vcpu);
1218 err = get_user(*out, opc);
1219 kvm_trap_emul_gva_lockless_end(vcpu);
1220
1221 if (unlikely(err)) {
1222
1223
1224
1225
1226 err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc,
1227 false);
1228 if (unlikely(err)) {
1229 kvm_err("%s: illegal address: %p\n",
1230 __func__, opc);
1231 return -EFAULT;
1232 }
1233
1234
1235 goto retry;
1236 }
1237 return 0;
1238}
1239