1
2
3
4
5
6
7
8
9
10
11
12#include <linux/highmem.h>
13#include <linux/kvm_host.h>
14#include <linux/uaccess.h>
15#include <asm/mmu_context.h>
16#include <asm/pgalloc.h>
17
18
19
20
21
22#if defined(__PAGETABLE_PMD_FOLDED)
23#define KVM_MMU_CACHE_MIN_PAGES 1
24#else
25#define KVM_MMU_CACHE_MIN_PAGES 2
26#endif
27
28static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
29 int min, int max)
30{
31 void *page;
32
33 BUG_ON(max > KVM_NR_MEM_OBJS);
34 if (cache->nobjs >= min)
35 return 0;
36 while (cache->nobjs < max) {
37 page = (void *)__get_free_page(GFP_KERNEL);
38 if (!page)
39 return -ENOMEM;
40 cache->objects[cache->nobjs++] = page;
41 }
42 return 0;
43}
44
45static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
46{
47 while (mc->nobjs)
48 free_page((unsigned long)mc->objects[--mc->nobjs]);
49}
50
51static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
52{
53 void *p;
54
55 BUG_ON(!mc || !mc->nobjs);
56 p = mc->objects[--mc->nobjs];
57 return p;
58}
59
60void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
61{
62 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
63}
64
65
66
67
68
69
70
71
72
73
74
75static void kvm_pgd_init(void *page)
76{
77 unsigned long *p, *end;
78 unsigned long entry;
79
80#ifdef __PAGETABLE_PMD_FOLDED
81 entry = (unsigned long)invalid_pte_table;
82#else
83 entry = (unsigned long)invalid_pmd_table;
84#endif
85
86 p = (unsigned long *)page;
87 end = p + PTRS_PER_PGD;
88
89 do {
90 p[0] = entry;
91 p[1] = entry;
92 p[2] = entry;
93 p[3] = entry;
94 p[4] = entry;
95 p += 8;
96 p[-3] = entry;
97 p[-2] = entry;
98 p[-1] = entry;
99 } while (p != end);
100}
101
102
103
104
105
106
107
108
109
110
111pgd_t *kvm_pgd_alloc(void)
112{
113 pgd_t *ret;
114
115 ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER);
116 if (ret)
117 kvm_pgd_init(ret);
118
119 return ret;
120}
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
137 unsigned long addr)
138{
139 p4d_t *p4d;
140 pud_t *pud;
141 pmd_t *pmd;
142
143 pgd += pgd_index(addr);
144 if (pgd_none(*pgd)) {
145
146 BUG();
147 return NULL;
148 }
149 p4d = p4d_offset(pgd, addr);
150 pud = pud_offset(p4d, addr);
151 if (pud_none(*pud)) {
152 pmd_t *new_pmd;
153
154 if (!cache)
155 return NULL;
156 new_pmd = mmu_memory_cache_alloc(cache);
157 pmd_init((unsigned long)new_pmd,
158 (unsigned long)invalid_pte_table);
159 pud_populate(NULL, pud, new_pmd);
160 }
161 pmd = pmd_offset(pud, addr);
162 if (pmd_none(*pmd)) {
163 pte_t *new_pte;
164
165 if (!cache)
166 return NULL;
167 new_pte = mmu_memory_cache_alloc(cache);
168 clear_page(new_pte);
169 pmd_populate_kernel(NULL, pmd, new_pte);
170 }
171 return pte_offset(pmd, addr);
172}
173
174
175static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
176 struct kvm_mmu_memory_cache *cache,
177 unsigned long addr)
178{
179 return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
180}
181
182
183
184
185
186
187static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa,
188 unsigned long end_gpa)
189{
190 int i_min = __pte_offset(start_gpa);
191 int i_max = __pte_offset(end_gpa);
192 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
193 int i;
194
195 for (i = i_min; i <= i_max; ++i) {
196 if (!pte_present(pte[i]))
197 continue;
198
199 set_pte(pte + i, __pte(0));
200 }
201 return safe_to_remove;
202}
203
204static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
205 unsigned long end_gpa)
206{
207 pte_t *pte;
208 unsigned long end = ~0ul;
209 int i_min = pmd_index(start_gpa);
210 int i_max = pmd_index(end_gpa);
211 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
212 int i;
213
214 for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
215 if (!pmd_present(pmd[i]))
216 continue;
217
218 pte = pte_offset(pmd + i, 0);
219 if (i == i_max)
220 end = end_gpa;
221
222 if (kvm_mips_flush_gpa_pte(pte, start_gpa, end)) {
223 pmd_clear(pmd + i);
224 pte_free_kernel(NULL, pte);
225 } else {
226 safe_to_remove = false;
227 }
228 }
229 return safe_to_remove;
230}
231
232static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa,
233 unsigned long end_gpa)
234{
235 pmd_t *pmd;
236 unsigned long end = ~0ul;
237 int i_min = pud_index(start_gpa);
238 int i_max = pud_index(end_gpa);
239 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
240 int i;
241
242 for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
243 if (!pud_present(pud[i]))
244 continue;
245
246 pmd = pmd_offset(pud + i, 0);
247 if (i == i_max)
248 end = end_gpa;
249
250 if (kvm_mips_flush_gpa_pmd(pmd, start_gpa, end)) {
251 pud_clear(pud + i);
252 pmd_free(NULL, pmd);
253 } else {
254 safe_to_remove = false;
255 }
256 }
257 return safe_to_remove;
258}
259
260static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa,
261 unsigned long end_gpa)
262{
263 p4d_t *p4d;
264 pud_t *pud;
265 unsigned long end = ~0ul;
266 int i_min = pgd_index(start_gpa);
267 int i_max = pgd_index(end_gpa);
268 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
269 int i;
270
271 for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
272 if (!pgd_present(pgd[i]))
273 continue;
274
275 p4d = p4d_offset(pgd, 0);
276 pud = pud_offset(p4d + i, 0);
277 if (i == i_max)
278 end = end_gpa;
279
280 if (kvm_mips_flush_gpa_pud(pud, start_gpa, end)) {
281 pgd_clear(pgd + i);
282 pud_free(NULL, pud);
283 } else {
284 safe_to_remove = false;
285 }
286 }
287 return safe_to_remove;
288}
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
304{
305 return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd,
306 start_gfn << PAGE_SHIFT,
307 end_gfn << PAGE_SHIFT);
308}
309
310#define BUILD_PTE_RANGE_OP(name, op) \
311static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
312 unsigned long end) \
313{ \
314 int ret = 0; \
315 int i_min = __pte_offset(start); \
316 int i_max = __pte_offset(end); \
317 int i; \
318 pte_t old, new; \
319 \
320 for (i = i_min; i <= i_max; ++i) { \
321 if (!pte_present(pte[i])) \
322 continue; \
323 \
324 old = pte[i]; \
325 new = op(old); \
326 if (pte_val(new) == pte_val(old)) \
327 continue; \
328 set_pte(pte + i, new); \
329 ret = 1; \
330 } \
331 return ret; \
332} \
333 \
334 \
335static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
336 unsigned long end) \
337{ \
338 int ret = 0; \
339 pte_t *pte; \
340 unsigned long cur_end = ~0ul; \
341 int i_min = pmd_index(start); \
342 int i_max = pmd_index(end); \
343 int i; \
344 \
345 for (i = i_min; i <= i_max; ++i, start = 0) { \
346 if (!pmd_present(pmd[i])) \
347 continue; \
348 \
349 pte = pte_offset(pmd + i, 0); \
350 if (i == i_max) \
351 cur_end = end; \
352 \
353 ret |= kvm_mips_##name##_pte(pte, start, cur_end); \
354 } \
355 return ret; \
356} \
357 \
358static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \
359 unsigned long end) \
360{ \
361 int ret = 0; \
362 pmd_t *pmd; \
363 unsigned long cur_end = ~0ul; \
364 int i_min = pud_index(start); \
365 int i_max = pud_index(end); \
366 int i; \
367 \
368 for (i = i_min; i <= i_max; ++i, start = 0) { \
369 if (!pud_present(pud[i])) \
370 continue; \
371 \
372 pmd = pmd_offset(pud + i, 0); \
373 if (i == i_max) \
374 cur_end = end; \
375 \
376 ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \
377 } \
378 return ret; \
379} \
380 \
381static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \
382 unsigned long end) \
383{ \
384 int ret = 0; \
385 p4d_t *p4d; \
386 pud_t *pud; \
387 unsigned long cur_end = ~0ul; \
388 int i_min = pgd_index(start); \
389 int i_max = pgd_index(end); \
390 int i; \
391 \
392 for (i = i_min; i <= i_max; ++i, start = 0) { \
393 if (!pgd_present(pgd[i])) \
394 continue; \
395 \
396 p4d = p4d_offset(pgd, 0); \
397 pud = pud_offset(p4d + i, 0); \
398 if (i == i_max) \
399 cur_end = end; \
400 \
401 ret |= kvm_mips_##name##_pud(pud, start, cur_end); \
402 } \
403 return ret; \
404}
405
406
407
408
409
410
411
412BUILD_PTE_RANGE_OP(mkclean, pte_mkclean)
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
430{
431 return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd,
432 start_gfn << PAGE_SHIFT,
433 end_gfn << PAGE_SHIFT);
434}
435
436
437
438
439
440
441
442
443
444
445
446
447void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
448 struct kvm_memory_slot *slot,
449 gfn_t gfn_offset, unsigned long mask)
450{
451 gfn_t base_gfn = slot->base_gfn + gfn_offset;
452 gfn_t start = base_gfn + __ffs(mask);
453 gfn_t end = base_gfn + __fls(mask);
454
455 kvm_mips_mkclean_gpa_pt(kvm, start, end);
456}
457
458
459
460
461
462
463
464BUILD_PTE_RANGE_OP(mkold, pte_mkold)
465
466static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
467 gfn_t end_gfn)
468{
469 return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd,
470 start_gfn << PAGE_SHIFT,
471 end_gfn << PAGE_SHIFT);
472}
473
474static int handle_hva_to_gpa(struct kvm *kvm,
475 unsigned long start,
476 unsigned long end,
477 int (*handler)(struct kvm *kvm, gfn_t gfn,
478 gpa_t gfn_end,
479 struct kvm_memory_slot *memslot,
480 void *data),
481 void *data)
482{
483 struct kvm_memslots *slots;
484 struct kvm_memory_slot *memslot;
485 int ret = 0;
486
487 slots = kvm_memslots(kvm);
488
489
490 kvm_for_each_memslot(memslot, slots) {
491 unsigned long hva_start, hva_end;
492 gfn_t gfn, gfn_end;
493
494 hva_start = max(start, memslot->userspace_addr);
495 hva_end = min(end, memslot->userspace_addr +
496 (memslot->npages << PAGE_SHIFT));
497 if (hva_start >= hva_end)
498 continue;
499
500
501
502
503
504 gfn = hva_to_gfn_memslot(hva_start, memslot);
505 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
506
507 ret |= handler(kvm, gfn, gfn_end, memslot, data);
508 }
509
510 return ret;
511}
512
513
514static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
515 struct kvm_memory_slot *memslot, void *data)
516{
517 kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
518 return 1;
519}
520
521int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
522{
523 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
524
525 kvm_mips_callbacks->flush_shadow_all(kvm);
526 return 0;
527}
528
529static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
530 struct kvm_memory_slot *memslot, void *data)
531{
532 gpa_t gpa = gfn << PAGE_SHIFT;
533 pte_t hva_pte = *(pte_t *)data;
534 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
535 pte_t old_pte;
536
537 if (!gpa_pte)
538 return 0;
539
540
541 old_pte = *gpa_pte;
542 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
543 hva_pte = pte_mkclean(hva_pte);
544 else if (memslot->flags & KVM_MEM_READONLY)
545 hva_pte = pte_wrprotect(hva_pte);
546
547 set_pte(gpa_pte, hva_pte);
548
549
550 if (!pte_present(old_pte) || !pte_young(old_pte))
551 return 0;
552
553
554 return !pte_present(hva_pte) ||
555 !pte_young(hva_pte) ||
556 pte_pfn(old_pte) != pte_pfn(hva_pte) ||
557 (pte_dirty(old_pte) && !pte_dirty(hva_pte));
558}
559
560int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
561{
562 unsigned long end = hva + PAGE_SIZE;
563 int ret;
564
565 ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
566 if (ret)
567 kvm_mips_callbacks->flush_shadow_all(kvm);
568 return 0;
569}
570
571static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
572 struct kvm_memory_slot *memslot, void *data)
573{
574 return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
575}
576
577static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
578 struct kvm_memory_slot *memslot, void *data)
579{
580 gpa_t gpa = gfn << PAGE_SHIFT;
581 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
582
583 if (!gpa_pte)
584 return 0;
585 return pte_young(*gpa_pte);
586}
587
588int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
589{
590 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
591}
592
593int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
594{
595 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
596}
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
617 bool write_fault,
618 pte_t *out_entry, pte_t *out_buddy)
619{
620 struct kvm *kvm = vcpu->kvm;
621 gfn_t gfn = gpa >> PAGE_SHIFT;
622 pte_t *ptep;
623 kvm_pfn_t pfn = 0;
624 bool pfn_valid = false;
625 int ret = 0;
626
627 spin_lock(&kvm->mmu_lock);
628
629
630 ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
631 if (!ptep || !pte_present(*ptep)) {
632 ret = -EFAULT;
633 goto out;
634 }
635
636
637 if (!pte_young(*ptep)) {
638 set_pte(ptep, pte_mkyoung(*ptep));
639 pfn = pte_pfn(*ptep);
640 pfn_valid = true;
641
642 }
643 if (write_fault && !pte_dirty(*ptep)) {
644 if (!pte_write(*ptep)) {
645 ret = -EFAULT;
646 goto out;
647 }
648
649
650 set_pte(ptep, pte_mkdirty(*ptep));
651 pfn = pte_pfn(*ptep);
652 mark_page_dirty(kvm, gfn);
653 kvm_set_pfn_dirty(pfn);
654 }
655
656 if (out_entry)
657 *out_entry = *ptep;
658 if (out_buddy)
659 *out_buddy = *ptep_buddy(ptep);
660
661out:
662 spin_unlock(&kvm->mmu_lock);
663 if (pfn_valid)
664 kvm_set_pfn_accessed(pfn);
665 return ret;
666}
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
693 bool write_fault,
694 pte_t *out_entry, pte_t *out_buddy)
695{
696 struct kvm *kvm = vcpu->kvm;
697 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
698 gfn_t gfn = gpa >> PAGE_SHIFT;
699 int srcu_idx, err;
700 kvm_pfn_t pfn;
701 pte_t *ptep, entry, old_pte;
702 bool writeable;
703 unsigned long prot_bits;
704 unsigned long mmu_seq;
705
706
707 srcu_idx = srcu_read_lock(&kvm->srcu);
708 err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry,
709 out_buddy);
710 if (!err)
711 goto out;
712
713
714 err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
715 KVM_NR_MEM_OBJS);
716 if (err)
717 goto out;
718
719retry:
720
721
722
723
724 mmu_seq = kvm->mmu_notifier_seq;
725
726
727
728
729
730
731
732
733
734
735
736 smp_rmb();
737
738
739 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
740 if (is_error_noslot_pfn(pfn)) {
741 err = -EFAULT;
742 goto out;
743 }
744
745 spin_lock(&kvm->mmu_lock);
746
747 if (mmu_notifier_retry(kvm, mmu_seq)) {
748
749
750
751
752
753 spin_unlock(&kvm->mmu_lock);
754 kvm_release_pfn_clean(pfn);
755 goto retry;
756 }
757
758
759 ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa);
760
761
762 prot_bits = _PAGE_PRESENT | __READABLE | _page_cachable_default;
763 if (writeable) {
764 prot_bits |= _PAGE_WRITE;
765 if (write_fault) {
766 prot_bits |= __WRITEABLE;
767 mark_page_dirty(kvm, gfn);
768 kvm_set_pfn_dirty(pfn);
769 }
770 }
771 entry = pfn_pte(pfn, __pgprot(prot_bits));
772
773
774 old_pte = *ptep;
775 set_pte(ptep, entry);
776
777 err = 0;
778 if (out_entry)
779 *out_entry = *ptep;
780 if (out_buddy)
781 *out_buddy = *ptep_buddy(ptep);
782
783 spin_unlock(&kvm->mmu_lock);
784 kvm_release_pfn_clean(pfn);
785 kvm_set_pfn_accessed(pfn);
786out:
787 srcu_read_unlock(&kvm->srcu, srcu_idx);
788 return err;
789}
790
791static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
792 unsigned long addr)
793{
794 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
795 pgd_t *pgdp;
796 int ret;
797
798
799 ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
800 KVM_NR_MEM_OBJS);
801 if (ret)
802 return NULL;
803
804 if (KVM_GUEST_KERNEL_MODE(vcpu))
805 pgdp = vcpu->arch.guest_kernel_mm.pgd;
806 else
807 pgdp = vcpu->arch.guest_user_mm.pgd;
808
809 return kvm_mips_walk_pgd(pgdp, memcache, addr);
810}
811
812void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
813 bool user)
814{
815 pgd_t *pgdp;
816 pte_t *ptep;
817
818 addr &= PAGE_MASK << 1;
819
820 pgdp = vcpu->arch.guest_kernel_mm.pgd;
821 ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
822 if (ptep) {
823 ptep[0] = pfn_pte(0, __pgprot(0));
824 ptep[1] = pfn_pte(0, __pgprot(0));
825 }
826
827 if (user) {
828 pgdp = vcpu->arch.guest_user_mm.pgd;
829 ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
830 if (ptep) {
831 ptep[0] = pfn_pte(0, __pgprot(0));
832 ptep[1] = pfn_pte(0, __pgprot(0));
833 }
834 }
835}
836
837
838
839
840
841
842static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
843 unsigned long end_gva)
844{
845 int i_min = __pte_offset(start_gva);
846 int i_max = __pte_offset(end_gva);
847 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
848 int i;
849
850
851
852
853
854 if (safe_to_remove)
855 return true;
856
857 for (i = i_min; i <= i_max; ++i) {
858 if (!pte_present(pte[i]))
859 continue;
860
861 set_pte(pte + i, __pte(0));
862 }
863 return false;
864}
865
866static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
867 unsigned long end_gva)
868{
869 pte_t *pte;
870 unsigned long end = ~0ul;
871 int i_min = pmd_index(start_gva);
872 int i_max = pmd_index(end_gva);
873 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
874 int i;
875
876 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
877 if (!pmd_present(pmd[i]))
878 continue;
879
880 pte = pte_offset(pmd + i, 0);
881 if (i == i_max)
882 end = end_gva;
883
884 if (kvm_mips_flush_gva_pte(pte, start_gva, end)) {
885 pmd_clear(pmd + i);
886 pte_free_kernel(NULL, pte);
887 } else {
888 safe_to_remove = false;
889 }
890 }
891 return safe_to_remove;
892}
893
894static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
895 unsigned long end_gva)
896{
897 pmd_t *pmd;
898 unsigned long end = ~0ul;
899 int i_min = pud_index(start_gva);
900 int i_max = pud_index(end_gva);
901 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
902 int i;
903
904 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
905 if (!pud_present(pud[i]))
906 continue;
907
908 pmd = pmd_offset(pud + i, 0);
909 if (i == i_max)
910 end = end_gva;
911
912 if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) {
913 pud_clear(pud + i);
914 pmd_free(NULL, pmd);
915 } else {
916 safe_to_remove = false;
917 }
918 }
919 return safe_to_remove;
920}
921
922static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
923 unsigned long end_gva)
924{
925 p4d_t *p4d;
926 pud_t *pud;
927 unsigned long end = ~0ul;
928 int i_min = pgd_index(start_gva);
929 int i_max = pgd_index(end_gva);
930 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
931 int i;
932
933 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
934 if (!pgd_present(pgd[i]))
935 continue;
936
937 p4d = p4d_offset(pgd, 0);
938 pud = pud_offset(p4d + i, 0);
939 if (i == i_max)
940 end = end_gva;
941
942 if (kvm_mips_flush_gva_pud(pud, start_gva, end)) {
943 pgd_clear(pgd + i);
944 pud_free(NULL, pud);
945 } else {
946 safe_to_remove = false;
947 }
948 }
949 return safe_to_remove;
950}
951
952void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags)
953{
954 if (flags & KMF_GPA) {
955
956 if (flags & KMF_KERN)
957
958 kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff);
959 else
960
961 kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
962 } else {
963
964 kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
965
966
967 if (flags & KMF_KERN)
968 kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff);
969 }
970}
971
972static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte)
973{
974
975
976
977
978
979 if (!pte_dirty(pte))
980 pte = pte_wrprotect(pte);
981
982 return pte;
983}
984
985static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
986{
987
988 if (!(entrylo & ENTRYLO_D))
989 pte = pte_mkclean(pte);
990
991 return kvm_mips_gpa_pte_to_gva_unmapped(pte);
992}
993
994#ifdef CONFIG_KVM_MIPS_VZ
995int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
996 struct kvm_vcpu *vcpu,
997 bool write_fault)
998{
999 int ret;
1000
1001 ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
1002 if (ret)
1003 return ret;
1004
1005
1006 return kvm_vz_host_tlb_inv(vcpu, badvaddr);
1007}
1008#endif
1009
1010
1011int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
1012 struct kvm_vcpu *vcpu,
1013 bool write_fault)
1014{
1015 unsigned long gpa;
1016 pte_t pte_gpa[2], *ptep_gva;
1017 int idx;
1018
1019 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
1020 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
1021 kvm_mips_dump_host_tlbs();
1022 return -1;
1023 }
1024
1025
1026 gpa = KVM_GUEST_CPHYSADDR(badvaddr);
1027 idx = (badvaddr >> PAGE_SHIFT) & 1;
1028 if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx],
1029 &pte_gpa[!idx]) < 0)
1030 return -1;
1031
1032
1033 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE);
1034 if (!ptep_gva) {
1035 kvm_err("No ptep for gva %lx\n", badvaddr);
1036 return -1;
1037 }
1038
1039
1040 ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]);
1041 ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]);
1042
1043
1044 kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
1045 return 0;
1046}
1047
1048int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
1049 struct kvm_mips_tlb *tlb,
1050 unsigned long gva,
1051 bool write_fault)
1052{
1053 struct kvm *kvm = vcpu->kvm;
1054 long tlb_lo[2];
1055 pte_t pte_gpa[2], *ptep_buddy, *ptep_gva;
1056 unsigned int idx = TLB_LO_IDX(*tlb, gva);
1057 bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
1058
1059 tlb_lo[0] = tlb->tlb_lo[0];
1060 tlb_lo[1] = tlb->tlb_lo[1];
1061
1062
1063
1064
1065
1066 if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1)))
1067 tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0;
1068
1069
1070 if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]),
1071 write_fault, &pte_gpa[idx], NULL) < 0)
1072 return -1;
1073
1074
1075 pte_gpa[!idx] = pfn_pte(0, __pgprot(0));
1076 if (tlb_lo[!idx] & ENTRYLO_V) {
1077 spin_lock(&kvm->mmu_lock);
1078 ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL,
1079 mips3_tlbpfn_to_paddr(tlb_lo[!idx]));
1080 if (ptep_buddy)
1081 pte_gpa[!idx] = *ptep_buddy;
1082 spin_unlock(&kvm->mmu_lock);
1083 }
1084
1085
1086 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE);
1087 if (!ptep_gva) {
1088 kvm_err("No ptep for gva %lx\n", gva);
1089 return -1;
1090 }
1091
1092
1093 ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]);
1094 ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]);
1095
1096
1097 kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel);
1098
1099 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
1100 tlb->tlb_lo[0], tlb->tlb_lo[1]);
1101
1102 return 0;
1103}
1104
1105int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
1106 struct kvm_vcpu *vcpu)
1107{
1108 kvm_pfn_t pfn;
1109 pte_t *ptep;
1110
1111 ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr);
1112 if (!ptep) {
1113 kvm_err("No ptep for commpage %lx\n", badvaddr);
1114 return -1;
1115 }
1116
1117 pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
1118
1119 *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, PAGE_SHARED)));
1120
1121
1122 kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
1123 return 0;
1124}
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
1138{
1139 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
1140 hrtimer_restart(&vcpu->arch.comparecount_timer);
1141}
1142
1143
1144void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1145{
1146 unsigned long flags;
1147
1148 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
1149
1150 local_irq_save(flags);
1151
1152 vcpu->cpu = cpu;
1153 if (vcpu->arch.last_sched_cpu != cpu) {
1154 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
1155 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
1156
1157
1158
1159
1160
1161 kvm_mips_migrate_count(vcpu);
1162 }
1163
1164
1165 kvm_mips_callbacks->vcpu_load(vcpu, cpu);
1166
1167 local_irq_restore(flags);
1168}
1169
1170
1171void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1172{
1173 unsigned long flags;
1174 int cpu;
1175
1176 local_irq_save(flags);
1177
1178 cpu = smp_processor_id();
1179 vcpu->arch.last_sched_cpu = cpu;
1180 vcpu->cpu = -1;
1181
1182
1183 kvm_mips_callbacks->vcpu_put(vcpu, cpu);
1184
1185 local_irq_restore(flags);
1186}
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
1205 unsigned long gva,
1206 bool write)
1207{
1208 struct mips_coproc *cop0 = vcpu->arch.cop0;
1209 struct kvm_mips_tlb *tlb;
1210 int index;
1211
1212 if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
1213 if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0)
1214 return KVM_MIPS_GPA;
1215 } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
1216 KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
1217
1218 index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) |
1219 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID));
1220 if (index < 0)
1221 return KVM_MIPS_TLB;
1222 tlb = &vcpu->arch.guest_tlb[index];
1223
1224
1225 if (!TLB_IS_VALID(*tlb, gva))
1226 return KVM_MIPS_TLBINV;
1227 if (write && !TLB_IS_DIRTY(*tlb, gva))
1228 return KVM_MIPS_TLBMOD;
1229
1230 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write))
1231 return KVM_MIPS_GPA;
1232 } else {
1233 return KVM_MIPS_GVA;
1234 }
1235
1236 return KVM_MIPS_MAPPED;
1237}
1238
1239int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
1240{
1241 int err;
1242
1243 if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ),
1244 "Expect BadInstr/BadInstrP registers to be used with VZ\n"))
1245 return -EINVAL;
1246
1247retry:
1248 kvm_trap_emul_gva_lockless_begin(vcpu);
1249 err = get_user(*out, opc);
1250 kvm_trap_emul_gva_lockless_end(vcpu);
1251
1252 if (unlikely(err)) {
1253
1254
1255
1256
1257 err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc,
1258 false);
1259 if (unlikely(err)) {
1260 kvm_err("%s: illegal address: %p\n",
1261 __func__, opc);
1262 return -EFAULT;
1263 }
1264
1265
1266 goto retry;
1267 }
1268 return 0;
1269}
1270