1
2
3
4
5
6
7#include <linux/mman.h>
8#include <linux/kvm_host.h>
9#include <linux/io.h>
10#include <linux/hugetlb.h>
11#include <linux/sched/signal.h>
12#include <trace/events/kvm.h>
13#include <asm/pgalloc.h>
14#include <asm/cacheflush.h>
15#include <asm/kvm_arm.h>
16#include <asm/kvm_mmu.h>
17#include <asm/kvm_ras.h>
18#include <asm/kvm_asm.h>
19#include <asm/kvm_emulate.h>
20#include <asm/virt.h>
21
22#include "trace.h"
23
24static pgd_t *boot_hyp_pgd;
25static pgd_t *hyp_pgd;
26static pgd_t *merged_hyp_pgd;
27static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
28
29static unsigned long hyp_idmap_start;
30static unsigned long hyp_idmap_end;
31static phys_addr_t hyp_idmap_vector;
32
33static unsigned long io_map_base;
34
35#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
36
37#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
38#define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
39
40static bool is_iomap(unsigned long flags)
41{
42 return flags & KVM_S2PTE_FLAG_IS_IOMAP;
43}
44
45static bool memslot_is_logging(struct kvm_memory_slot *memslot)
46{
47 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
48}
49
50
51
52
53
54
55
56void kvm_flush_remote_tlbs(struct kvm *kvm)
57{
58 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
59}
60
61static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
62{
63 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
64}
65
66
67
68
69
70
71static void kvm_flush_dcache_pte(pte_t pte)
72{
73 __kvm_flush_dcache_pte(pte);
74}
75
76static void kvm_flush_dcache_pmd(pmd_t pmd)
77{
78 __kvm_flush_dcache_pmd(pmd);
79}
80
81static void kvm_flush_dcache_pud(pud_t pud)
82{
83 __kvm_flush_dcache_pud(pud);
84}
85
86static bool kvm_is_device_pfn(unsigned long pfn)
87{
88 return !pfn_valid(pfn);
89}
90
91
92
93
94
95
96
97
98
99static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
100{
101 if (!pmd_thp_or_huge(*pmd))
102 return;
103
104 pmd_clear(pmd);
105 kvm_tlb_flush_vmid_ipa(kvm, addr);
106 put_page(virt_to_page(pmd));
107}
108
109
110
111
112
113
114
115
116
117static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
118{
119 if (!stage2_pud_huge(kvm, *pudp))
120 return;
121
122 stage2_pud_clear(kvm, pudp);
123 kvm_tlb_flush_vmid_ipa(kvm, addr);
124 put_page(virt_to_page(pudp));
125}
126
127static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
128 int min, int max)
129{
130 void *page;
131
132 BUG_ON(max > KVM_NR_MEM_OBJS);
133 if (cache->nobjs >= min)
134 return 0;
135 while (cache->nobjs < max) {
136 page = (void *)__get_free_page(GFP_PGTABLE_USER);
137 if (!page)
138 return -ENOMEM;
139 cache->objects[cache->nobjs++] = page;
140 }
141 return 0;
142}
143
144static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
145{
146 while (mc->nobjs)
147 free_page((unsigned long)mc->objects[--mc->nobjs]);
148}
149
150static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
151{
152 void *p;
153
154 BUG_ON(!mc || !mc->nobjs);
155 p = mc->objects[--mc->nobjs];
156 return p;
157}
158
159static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
160{
161 pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
162 stage2_pgd_clear(kvm, pgd);
163 kvm_tlb_flush_vmid_ipa(kvm, addr);
164 stage2_pud_free(kvm, pud_table);
165 put_page(virt_to_page(pgd));
166}
167
168static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
169{
170 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
171 VM_BUG_ON(stage2_pud_huge(kvm, *pud));
172 stage2_pud_clear(kvm, pud);
173 kvm_tlb_flush_vmid_ipa(kvm, addr);
174 stage2_pmd_free(kvm, pmd_table);
175 put_page(virt_to_page(pud));
176}
177
178static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
179{
180 pte_t *pte_table = pte_offset_kernel(pmd, 0);
181 VM_BUG_ON(pmd_thp_or_huge(*pmd));
182 pmd_clear(pmd);
183 kvm_tlb_flush_vmid_ipa(kvm, addr);
184 free_page((unsigned long)pte_table);
185 put_page(virt_to_page(pmd));
186}
187
188static inline void kvm_set_pte(pte_t *ptep, pte_t new_pte)
189{
190 WRITE_ONCE(*ptep, new_pte);
191 dsb(ishst);
192}
193
194static inline void kvm_set_pmd(pmd_t *pmdp, pmd_t new_pmd)
195{
196 WRITE_ONCE(*pmdp, new_pmd);
197 dsb(ishst);
198}
199
200static inline void kvm_pmd_populate(pmd_t *pmdp, pte_t *ptep)
201{
202 kvm_set_pmd(pmdp, kvm_mk_pmd(ptep));
203}
204
205static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp)
206{
207 WRITE_ONCE(*pudp, kvm_mk_pud(pmdp));
208 dsb(ishst);
209}
210
211static inline void kvm_pgd_populate(pgd_t *pgdp, pud_t *pudp)
212{
213 WRITE_ONCE(*pgdp, kvm_mk_pgd(pudp));
214 dsb(ishst);
215}
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
242 phys_addr_t addr, phys_addr_t end)
243{
244 phys_addr_t start_addr = addr;
245 pte_t *pte, *start_pte;
246
247 start_pte = pte = pte_offset_kernel(pmd, addr);
248 do {
249 if (!pte_none(*pte)) {
250 pte_t old_pte = *pte;
251
252 kvm_set_pte(pte, __pte(0));
253 kvm_tlb_flush_vmid_ipa(kvm, addr);
254
255
256 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
257 kvm_flush_dcache_pte(old_pte);
258
259 put_page(virt_to_page(pte));
260 }
261 } while (pte++, addr += PAGE_SIZE, addr != end);
262
263 if (stage2_pte_table_empty(kvm, start_pte))
264 clear_stage2_pmd_entry(kvm, pmd, start_addr);
265}
266
267static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
268 phys_addr_t addr, phys_addr_t end)
269{
270 phys_addr_t next, start_addr = addr;
271 pmd_t *pmd, *start_pmd;
272
273 start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
274 do {
275 next = stage2_pmd_addr_end(kvm, addr, end);
276 if (!pmd_none(*pmd)) {
277 if (pmd_thp_or_huge(*pmd)) {
278 pmd_t old_pmd = *pmd;
279
280 pmd_clear(pmd);
281 kvm_tlb_flush_vmid_ipa(kvm, addr);
282
283 kvm_flush_dcache_pmd(old_pmd);
284
285 put_page(virt_to_page(pmd));
286 } else {
287 unmap_stage2_ptes(kvm, pmd, addr, next);
288 }
289 }
290 } while (pmd++, addr = next, addr != end);
291
292 if (stage2_pmd_table_empty(kvm, start_pmd))
293 clear_stage2_pud_entry(kvm, pud, start_addr);
294}
295
296static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
297 phys_addr_t addr, phys_addr_t end)
298{
299 phys_addr_t next, start_addr = addr;
300 pud_t *pud, *start_pud;
301
302 start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
303 do {
304 next = stage2_pud_addr_end(kvm, addr, end);
305 if (!stage2_pud_none(kvm, *pud)) {
306 if (stage2_pud_huge(kvm, *pud)) {
307 pud_t old_pud = *pud;
308
309 stage2_pud_clear(kvm, pud);
310 kvm_tlb_flush_vmid_ipa(kvm, addr);
311 kvm_flush_dcache_pud(old_pud);
312 put_page(virt_to_page(pud));
313 } else {
314 unmap_stage2_pmds(kvm, pud, addr, next);
315 }
316 }
317 } while (pud++, addr = next, addr != end);
318
319 if (stage2_pud_table_empty(kvm, start_pud))
320 clear_stage2_pgd_entry(kvm, pgd, start_addr);
321}
322
323
324
325
326
327
328
329
330
331
332
333
334static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
335{
336 pgd_t *pgd;
337 phys_addr_t addr = start, end = start + size;
338 phys_addr_t next;
339
340 assert_spin_locked(&kvm->mmu_lock);
341 WARN_ON(size & ~PAGE_MASK);
342
343 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
344 do {
345
346
347
348
349
350 if (!READ_ONCE(kvm->arch.pgd))
351 break;
352 next = stage2_pgd_addr_end(kvm, addr, end);
353 if (!stage2_pgd_none(kvm, *pgd))
354 unmap_stage2_puds(kvm, pgd, addr, next);
355
356
357
358
359 if (next != end)
360 cond_resched_lock(&kvm->mmu_lock);
361 } while (pgd++, addr = next, addr != end);
362}
363
364static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
365 phys_addr_t addr, phys_addr_t end)
366{
367 pte_t *pte;
368
369 pte = pte_offset_kernel(pmd, addr);
370 do {
371 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
372 kvm_flush_dcache_pte(*pte);
373 } while (pte++, addr += PAGE_SIZE, addr != end);
374}
375
376static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
377 phys_addr_t addr, phys_addr_t end)
378{
379 pmd_t *pmd;
380 phys_addr_t next;
381
382 pmd = stage2_pmd_offset(kvm, pud, addr);
383 do {
384 next = stage2_pmd_addr_end(kvm, addr, end);
385 if (!pmd_none(*pmd)) {
386 if (pmd_thp_or_huge(*pmd))
387 kvm_flush_dcache_pmd(*pmd);
388 else
389 stage2_flush_ptes(kvm, pmd, addr, next);
390 }
391 } while (pmd++, addr = next, addr != end);
392}
393
394static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
395 phys_addr_t addr, phys_addr_t end)
396{
397 pud_t *pud;
398 phys_addr_t next;
399
400 pud = stage2_pud_offset(kvm, pgd, addr);
401 do {
402 next = stage2_pud_addr_end(kvm, addr, end);
403 if (!stage2_pud_none(kvm, *pud)) {
404 if (stage2_pud_huge(kvm, *pud))
405 kvm_flush_dcache_pud(*pud);
406 else
407 stage2_flush_pmds(kvm, pud, addr, next);
408 }
409 } while (pud++, addr = next, addr != end);
410}
411
412static void stage2_flush_memslot(struct kvm *kvm,
413 struct kvm_memory_slot *memslot)
414{
415 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
416 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
417 phys_addr_t next;
418 pgd_t *pgd;
419
420 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
421 do {
422 next = stage2_pgd_addr_end(kvm, addr, end);
423 if (!stage2_pgd_none(kvm, *pgd))
424 stage2_flush_puds(kvm, pgd, addr, next);
425 } while (pgd++, addr = next, addr != end);
426}
427
428
429
430
431
432
433
434
435static void stage2_flush_vm(struct kvm *kvm)
436{
437 struct kvm_memslots *slots;
438 struct kvm_memory_slot *memslot;
439 int idx;
440
441 idx = srcu_read_lock(&kvm->srcu);
442 spin_lock(&kvm->mmu_lock);
443
444 slots = kvm_memslots(kvm);
445 kvm_for_each_memslot(memslot, slots)
446 stage2_flush_memslot(kvm, memslot);
447
448 spin_unlock(&kvm->mmu_lock);
449 srcu_read_unlock(&kvm->srcu, idx);
450}
451
452static void clear_hyp_pgd_entry(pgd_t *pgd)
453{
454 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
455 pgd_clear(pgd);
456 pud_free(NULL, pud_table);
457 put_page(virt_to_page(pgd));
458}
459
460static void clear_hyp_pud_entry(pud_t *pud)
461{
462 pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
463 VM_BUG_ON(pud_huge(*pud));
464 pud_clear(pud);
465 pmd_free(NULL, pmd_table);
466 put_page(virt_to_page(pud));
467}
468
469static void clear_hyp_pmd_entry(pmd_t *pmd)
470{
471 pte_t *pte_table = pte_offset_kernel(pmd, 0);
472 VM_BUG_ON(pmd_thp_or_huge(*pmd));
473 pmd_clear(pmd);
474 pte_free_kernel(NULL, pte_table);
475 put_page(virt_to_page(pmd));
476}
477
478static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
479{
480 pte_t *pte, *start_pte;
481
482 start_pte = pte = pte_offset_kernel(pmd, addr);
483 do {
484 if (!pte_none(*pte)) {
485 kvm_set_pte(pte, __pte(0));
486 put_page(virt_to_page(pte));
487 }
488 } while (pte++, addr += PAGE_SIZE, addr != end);
489
490 if (hyp_pte_table_empty(start_pte))
491 clear_hyp_pmd_entry(pmd);
492}
493
494static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
495{
496 phys_addr_t next;
497 pmd_t *pmd, *start_pmd;
498
499 start_pmd = pmd = pmd_offset(pud, addr);
500 do {
501 next = pmd_addr_end(addr, end);
502
503 if (!pmd_none(*pmd))
504 unmap_hyp_ptes(pmd, addr, next);
505 } while (pmd++, addr = next, addr != end);
506
507 if (hyp_pmd_table_empty(start_pmd))
508 clear_hyp_pud_entry(pud);
509}
510
511static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
512{
513 phys_addr_t next;
514 pud_t *pud, *start_pud;
515
516 start_pud = pud = pud_offset(pgd, addr);
517 do {
518 next = pud_addr_end(addr, end);
519
520 if (!pud_none(*pud))
521 unmap_hyp_pmds(pud, addr, next);
522 } while (pud++, addr = next, addr != end);
523
524 if (hyp_pud_table_empty(start_pud))
525 clear_hyp_pgd_entry(pgd);
526}
527
528static unsigned int kvm_pgd_index(unsigned long addr, unsigned int ptrs_per_pgd)
529{
530 return (addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1);
531}
532
533static void __unmap_hyp_range(pgd_t *pgdp, unsigned long ptrs_per_pgd,
534 phys_addr_t start, u64 size)
535{
536 pgd_t *pgd;
537 phys_addr_t addr = start, end = start + size;
538 phys_addr_t next;
539
540
541
542
543
544 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
545 do {
546 next = pgd_addr_end(addr, end);
547 if (!pgd_none(*pgd))
548 unmap_hyp_puds(pgd, addr, next);
549 } while (pgd++, addr = next, addr != end);
550}
551
552static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
553{
554 __unmap_hyp_range(pgdp, PTRS_PER_PGD, start, size);
555}
556
557static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size)
558{
559 __unmap_hyp_range(pgdp, __kvm_idmap_ptrs_per_pgd(), start, size);
560}
561
562
563
564
565
566
567
568
569
570
571
572void free_hyp_pgds(void)
573{
574 pgd_t *id_pgd;
575
576 mutex_lock(&kvm_hyp_pgd_mutex);
577
578 id_pgd = boot_hyp_pgd ? boot_hyp_pgd : hyp_pgd;
579
580 if (id_pgd) {
581
582 if (!io_map_base)
583 io_map_base = hyp_idmap_start;
584 unmap_hyp_idmap_range(id_pgd, io_map_base,
585 hyp_idmap_start + PAGE_SIZE - io_map_base);
586 }
587
588 if (boot_hyp_pgd) {
589 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
590 boot_hyp_pgd = NULL;
591 }
592
593 if (hyp_pgd) {
594 unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
595 (uintptr_t)high_memory - PAGE_OFFSET);
596
597 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
598 hyp_pgd = NULL;
599 }
600 if (merged_hyp_pgd) {
601 clear_page(merged_hyp_pgd);
602 free_page((unsigned long)merged_hyp_pgd);
603 merged_hyp_pgd = NULL;
604 }
605
606 mutex_unlock(&kvm_hyp_pgd_mutex);
607}
608
609static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
610 unsigned long end, unsigned long pfn,
611 pgprot_t prot)
612{
613 pte_t *pte;
614 unsigned long addr;
615
616 addr = start;
617 do {
618 pte = pte_offset_kernel(pmd, addr);
619 kvm_set_pte(pte, kvm_pfn_pte(pfn, prot));
620 get_page(virt_to_page(pte));
621 pfn++;
622 } while (addr += PAGE_SIZE, addr != end);
623}
624
625static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
626 unsigned long end, unsigned long pfn,
627 pgprot_t prot)
628{
629 pmd_t *pmd;
630 pte_t *pte;
631 unsigned long addr, next;
632
633 addr = start;
634 do {
635 pmd = pmd_offset(pud, addr);
636
637 BUG_ON(pmd_sect(*pmd));
638
639 if (pmd_none(*pmd)) {
640 pte = pte_alloc_one_kernel(NULL);
641 if (!pte) {
642 kvm_err("Cannot allocate Hyp pte\n");
643 return -ENOMEM;
644 }
645 kvm_pmd_populate(pmd, pte);
646 get_page(virt_to_page(pmd));
647 }
648
649 next = pmd_addr_end(addr, end);
650
651 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
652 pfn += (next - addr) >> PAGE_SHIFT;
653 } while (addr = next, addr != end);
654
655 return 0;
656}
657
658static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
659 unsigned long end, unsigned long pfn,
660 pgprot_t prot)
661{
662 pud_t *pud;
663 pmd_t *pmd;
664 unsigned long addr, next;
665 int ret;
666
667 addr = start;
668 do {
669 pud = pud_offset(pgd, addr);
670
671 if (pud_none_or_clear_bad(pud)) {
672 pmd = pmd_alloc_one(NULL, addr);
673 if (!pmd) {
674 kvm_err("Cannot allocate Hyp pmd\n");
675 return -ENOMEM;
676 }
677 kvm_pud_populate(pud, pmd);
678 get_page(virt_to_page(pud));
679 }
680
681 next = pud_addr_end(addr, end);
682 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
683 if (ret)
684 return ret;
685 pfn += (next - addr) >> PAGE_SHIFT;
686 } while (addr = next, addr != end);
687
688 return 0;
689}
690
691static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
692 unsigned long start, unsigned long end,
693 unsigned long pfn, pgprot_t prot)
694{
695 pgd_t *pgd;
696 pud_t *pud;
697 unsigned long addr, next;
698 int err = 0;
699
700 mutex_lock(&kvm_hyp_pgd_mutex);
701 addr = start & PAGE_MASK;
702 end = PAGE_ALIGN(end);
703 do {
704 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
705
706 if (pgd_none(*pgd)) {
707 pud = pud_alloc_one(NULL, addr);
708 if (!pud) {
709 kvm_err("Cannot allocate Hyp pud\n");
710 err = -ENOMEM;
711 goto out;
712 }
713 kvm_pgd_populate(pgd, pud);
714 get_page(virt_to_page(pgd));
715 }
716
717 next = pgd_addr_end(addr, end);
718 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
719 if (err)
720 goto out;
721 pfn += (next - addr) >> PAGE_SHIFT;
722 } while (addr = next, addr != end);
723out:
724 mutex_unlock(&kvm_hyp_pgd_mutex);
725 return err;
726}
727
728static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
729{
730 if (!is_vmalloc_addr(kaddr)) {
731 BUG_ON(!virt_addr_valid(kaddr));
732 return __pa(kaddr);
733 } else {
734 return page_to_phys(vmalloc_to_page(kaddr)) +
735 offset_in_page(kaddr);
736 }
737}
738
739
740
741
742
743
744
745
746
747
748
749int create_hyp_mappings(void *from, void *to, pgprot_t prot)
750{
751 phys_addr_t phys_addr;
752 unsigned long virt_addr;
753 unsigned long start = kern_hyp_va((unsigned long)from);
754 unsigned long end = kern_hyp_va((unsigned long)to);
755
756 if (is_kernel_in_hyp_mode())
757 return 0;
758
759 start = start & PAGE_MASK;
760 end = PAGE_ALIGN(end);
761
762 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
763 int err;
764
765 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
766 err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
767 virt_addr, virt_addr + PAGE_SIZE,
768 __phys_to_pfn(phys_addr),
769 prot);
770 if (err)
771 return err;
772 }
773
774 return 0;
775}
776
777static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
778 unsigned long *haddr, pgprot_t prot)
779{
780 pgd_t *pgd = hyp_pgd;
781 unsigned long base;
782 int ret = 0;
783
784 mutex_lock(&kvm_hyp_pgd_mutex);
785
786
787
788
789
790
791
792
793
794 size = PAGE_ALIGN(size + offset_in_page(phys_addr));
795 base = io_map_base - size;
796
797
798
799
800
801
802 if ((base ^ io_map_base) & BIT(VA_BITS - 1))
803 ret = -ENOMEM;
804 else
805 io_map_base = base;
806
807 mutex_unlock(&kvm_hyp_pgd_mutex);
808
809 if (ret)
810 goto out;
811
812 if (__kvm_cpu_uses_extended_idmap())
813 pgd = boot_hyp_pgd;
814
815 ret = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
816 base, base + size,
817 __phys_to_pfn(phys_addr), prot);
818 if (ret)
819 goto out;
820
821 *haddr = base + offset_in_page(phys_addr);
822
823out:
824 return ret;
825}
826
827
828
829
830
831
832
833
834int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
835 void __iomem **kaddr,
836 void __iomem **haddr)
837{
838 unsigned long addr;
839 int ret;
840
841 *kaddr = ioremap(phys_addr, size);
842 if (!*kaddr)
843 return -ENOMEM;
844
845 if (is_kernel_in_hyp_mode()) {
846 *haddr = *kaddr;
847 return 0;
848 }
849
850 ret = __create_hyp_private_mapping(phys_addr, size,
851 &addr, PAGE_HYP_DEVICE);
852 if (ret) {
853 iounmap(*kaddr);
854 *kaddr = NULL;
855 *haddr = NULL;
856 return ret;
857 }
858
859 *haddr = (void __iomem *)addr;
860 return 0;
861}
862
863
864
865
866
867
868
869int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
870 void **haddr)
871{
872 unsigned long addr;
873 int ret;
874
875 BUG_ON(is_kernel_in_hyp_mode());
876
877 ret = __create_hyp_private_mapping(phys_addr, size,
878 &addr, PAGE_HYP_EXEC);
879 if (ret) {
880 *haddr = NULL;
881 return ret;
882 }
883
884 *haddr = (void *)addr;
885 return 0;
886}
887
888
889
890
891
892
893
894
895
896
897
898int kvm_alloc_stage2_pgd(struct kvm *kvm)
899{
900 phys_addr_t pgd_phys;
901 pgd_t *pgd;
902
903 if (kvm->arch.pgd != NULL) {
904 kvm_err("kvm_arch already initialized?\n");
905 return -EINVAL;
906 }
907
908
909 pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
910 if (!pgd)
911 return -ENOMEM;
912
913 pgd_phys = virt_to_phys(pgd);
914 if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
915 return -EINVAL;
916
917 kvm->arch.pgd = pgd;
918 kvm->arch.pgd_phys = pgd_phys;
919 return 0;
920}
921
922static void stage2_unmap_memslot(struct kvm *kvm,
923 struct kvm_memory_slot *memslot)
924{
925 hva_t hva = memslot->userspace_addr;
926 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
927 phys_addr_t size = PAGE_SIZE * memslot->npages;
928 hva_t reg_end = hva + size;
929
930
931
932
933
934
935
936
937
938
939
940
941
942 do {
943 struct vm_area_struct *vma = find_vma(current->mm, hva);
944 hva_t vm_start, vm_end;
945
946 if (!vma || vma->vm_start >= reg_end)
947 break;
948
949
950
951
952 vm_start = max(hva, vma->vm_start);
953 vm_end = min(reg_end, vma->vm_end);
954
955 if (!(vma->vm_flags & VM_PFNMAP)) {
956 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
957 unmap_stage2_range(kvm, gpa, vm_end - vm_start);
958 }
959 hva = vm_end;
960 } while (hva < reg_end);
961}
962
963
964
965
966
967
968
969
970void stage2_unmap_vm(struct kvm *kvm)
971{
972 struct kvm_memslots *slots;
973 struct kvm_memory_slot *memslot;
974 int idx;
975
976 idx = srcu_read_lock(&kvm->srcu);
977 down_read(¤t->mm->mmap_sem);
978 spin_lock(&kvm->mmu_lock);
979
980 slots = kvm_memslots(kvm);
981 kvm_for_each_memslot(memslot, slots)
982 stage2_unmap_memslot(kvm, memslot);
983
984 spin_unlock(&kvm->mmu_lock);
985 up_read(¤t->mm->mmap_sem);
986 srcu_read_unlock(&kvm->srcu, idx);
987}
988
989
990
991
992
993
994
995
996
997void kvm_free_stage2_pgd(struct kvm *kvm)
998{
999 void *pgd = NULL;
1000
1001 spin_lock(&kvm->mmu_lock);
1002 if (kvm->arch.pgd) {
1003 unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
1004 pgd = READ_ONCE(kvm->arch.pgd);
1005 kvm->arch.pgd = NULL;
1006 kvm->arch.pgd_phys = 0;
1007 }
1008 spin_unlock(&kvm->mmu_lock);
1009
1010
1011 if (pgd)
1012 free_pages_exact(pgd, stage2_pgd_size(kvm));
1013}
1014
1015static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1016 phys_addr_t addr)
1017{
1018 pgd_t *pgd;
1019 pud_t *pud;
1020
1021 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
1022 if (stage2_pgd_none(kvm, *pgd)) {
1023 if (!cache)
1024 return NULL;
1025 pud = mmu_memory_cache_alloc(cache);
1026 stage2_pgd_populate(kvm, pgd, pud);
1027 get_page(virt_to_page(pgd));
1028 }
1029
1030 return stage2_pud_offset(kvm, pgd, addr);
1031}
1032
1033static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1034 phys_addr_t addr)
1035{
1036 pud_t *pud;
1037 pmd_t *pmd;
1038
1039 pud = stage2_get_pud(kvm, cache, addr);
1040 if (!pud || stage2_pud_huge(kvm, *pud))
1041 return NULL;
1042
1043 if (stage2_pud_none(kvm, *pud)) {
1044 if (!cache)
1045 return NULL;
1046 pmd = mmu_memory_cache_alloc(cache);
1047 stage2_pud_populate(kvm, pud, pmd);
1048 get_page(virt_to_page(pud));
1049 }
1050
1051 return stage2_pmd_offset(kvm, pud, addr);
1052}
1053
1054static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
1055 *cache, phys_addr_t addr, const pmd_t *new_pmd)
1056{
1057 pmd_t *pmd, old_pmd;
1058
1059retry:
1060 pmd = stage2_get_pmd(kvm, cache, addr);
1061 VM_BUG_ON(!pmd);
1062
1063 old_pmd = *pmd;
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075 if (pmd_val(old_pmd) == pmd_val(*new_pmd))
1076 return 0;
1077
1078 if (pmd_present(old_pmd)) {
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092 if (!pmd_thp_or_huge(old_pmd)) {
1093 unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE);
1094 goto retry;
1095 }
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
1108 pmd_clear(pmd);
1109 kvm_tlb_flush_vmid_ipa(kvm, addr);
1110 } else {
1111 get_page(virt_to_page(pmd));
1112 }
1113
1114 kvm_set_pmd(pmd, *new_pmd);
1115 return 0;
1116}
1117
1118static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1119 phys_addr_t addr, const pud_t *new_pudp)
1120{
1121 pud_t *pudp, old_pud;
1122
1123retry:
1124 pudp = stage2_get_pud(kvm, cache, addr);
1125 VM_BUG_ON(!pudp);
1126
1127 old_pud = *pudp;
1128
1129
1130
1131
1132
1133
1134 if (pud_val(old_pud) == pud_val(*new_pudp))
1135 return 0;
1136
1137 if (stage2_pud_present(kvm, old_pud)) {
1138
1139
1140
1141
1142 if (!stage2_pud_huge(kvm, old_pud)) {
1143 unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE);
1144 goto retry;
1145 }
1146
1147 WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
1148 stage2_pud_clear(kvm, pudp);
1149 kvm_tlb_flush_vmid_ipa(kvm, addr);
1150 } else {
1151 get_page(virt_to_page(pudp));
1152 }
1153
1154 kvm_set_pud(pudp, *new_pudp);
1155 return 0;
1156}
1157
1158
1159
1160
1161
1162
1163
1164static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
1165 pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp)
1166{
1167 pud_t *pudp;
1168 pmd_t *pmdp;
1169 pte_t *ptep;
1170
1171 *pudpp = NULL;
1172 *pmdpp = NULL;
1173 *ptepp = NULL;
1174
1175 pudp = stage2_get_pud(kvm, NULL, addr);
1176 if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp))
1177 return false;
1178
1179 if (stage2_pud_huge(kvm, *pudp)) {
1180 *pudpp = pudp;
1181 return true;
1182 }
1183
1184 pmdp = stage2_pmd_offset(kvm, pudp, addr);
1185 if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
1186 return false;
1187
1188 if (pmd_thp_or_huge(*pmdp)) {
1189 *pmdpp = pmdp;
1190 return true;
1191 }
1192
1193 ptep = pte_offset_kernel(pmdp, addr);
1194 if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
1195 return false;
1196
1197 *ptepp = ptep;
1198 return true;
1199}
1200
1201static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
1202{
1203 pud_t *pudp;
1204 pmd_t *pmdp;
1205 pte_t *ptep;
1206 bool found;
1207
1208 found = stage2_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep);
1209 if (!found)
1210 return false;
1211
1212 if (pudp)
1213 return kvm_s2pud_exec(pudp);
1214 else if (pmdp)
1215 return kvm_s2pmd_exec(pmdp);
1216 else
1217 return kvm_s2pte_exec(ptep);
1218}
1219
1220static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1221 phys_addr_t addr, const pte_t *new_pte,
1222 unsigned long flags)
1223{
1224 pud_t *pud;
1225 pmd_t *pmd;
1226 pte_t *pte, old_pte;
1227 bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
1228 bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
1229
1230 VM_BUG_ON(logging_active && !cache);
1231
1232
1233 pud = stage2_get_pud(kvm, cache, addr);
1234 if (!pud) {
1235
1236
1237
1238
1239 return 0;
1240 }
1241
1242
1243
1244
1245
1246 if (logging_active)
1247 stage2_dissolve_pud(kvm, addr, pud);
1248
1249 if (stage2_pud_none(kvm, *pud)) {
1250 if (!cache)
1251 return 0;
1252 pmd = mmu_memory_cache_alloc(cache);
1253 stage2_pud_populate(kvm, pud, pmd);
1254 get_page(virt_to_page(pud));
1255 }
1256
1257 pmd = stage2_pmd_offset(kvm, pud, addr);
1258 if (!pmd) {
1259
1260
1261
1262
1263 return 0;
1264 }
1265
1266
1267
1268
1269
1270 if (logging_active)
1271 stage2_dissolve_pmd(kvm, addr, pmd);
1272
1273
1274 if (pmd_none(*pmd)) {
1275 if (!cache)
1276 return 0;
1277 pte = mmu_memory_cache_alloc(cache);
1278 kvm_pmd_populate(pmd, pte);
1279 get_page(virt_to_page(pmd));
1280 }
1281
1282 pte = pte_offset_kernel(pmd, addr);
1283
1284 if (iomap && pte_present(*pte))
1285 return -EFAULT;
1286
1287
1288 old_pte = *pte;
1289 if (pte_present(old_pte)) {
1290
1291 if (pte_val(old_pte) == pte_val(*new_pte))
1292 return 0;
1293
1294 kvm_set_pte(pte, __pte(0));
1295 kvm_tlb_flush_vmid_ipa(kvm, addr);
1296 } else {
1297 get_page(virt_to_page(pte));
1298 }
1299
1300 kvm_set_pte(pte, *new_pte);
1301 return 0;
1302}
1303
1304#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1305static int stage2_ptep_test_and_clear_young(pte_t *pte)
1306{
1307 if (pte_young(*pte)) {
1308 *pte = pte_mkold(*pte);
1309 return 1;
1310 }
1311 return 0;
1312}
1313#else
1314static int stage2_ptep_test_and_clear_young(pte_t *pte)
1315{
1316 return __ptep_test_and_clear_young(pte);
1317}
1318#endif
1319
1320static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
1321{
1322 return stage2_ptep_test_and_clear_young((pte_t *)pmd);
1323}
1324
1325static int stage2_pudp_test_and_clear_young(pud_t *pud)
1326{
1327 return stage2_ptep_test_and_clear_young((pte_t *)pud);
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1339 phys_addr_t pa, unsigned long size, bool writable)
1340{
1341 phys_addr_t addr, end;
1342 int ret = 0;
1343 unsigned long pfn;
1344 struct kvm_mmu_memory_cache cache = { 0, };
1345
1346 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
1347 pfn = __phys_to_pfn(pa);
1348
1349 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
1350 pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE);
1351
1352 if (writable)
1353 pte = kvm_s2pte_mkwrite(pte);
1354
1355 ret = mmu_topup_memory_cache(&cache,
1356 kvm_mmu_cache_min_pages(kvm),
1357 KVM_NR_MEM_OBJS);
1358 if (ret)
1359 goto out;
1360 spin_lock(&kvm->mmu_lock);
1361 ret = stage2_set_pte(kvm, &cache, addr, &pte,
1362 KVM_S2PTE_FLAG_IS_IOMAP);
1363 spin_unlock(&kvm->mmu_lock);
1364 if (ret)
1365 goto out;
1366
1367 pfn++;
1368 }
1369
1370out:
1371 mmu_free_memory_cache(&cache);
1372 return ret;
1373}
1374
1375static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
1376{
1377 kvm_pfn_t pfn = *pfnp;
1378 gfn_t gfn = *ipap >> PAGE_SHIFT;
1379
1380 if (kvm_is_transparent_hugepage(pfn)) {
1381 unsigned long mask;
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400 mask = PTRS_PER_PMD - 1;
1401 VM_BUG_ON((gfn & mask) != (pfn & mask));
1402 if (pfn & mask) {
1403 *ipap &= PMD_MASK;
1404 kvm_release_pfn_clean(pfn);
1405 pfn &= ~mask;
1406 kvm_get_pfn(pfn);
1407 *pfnp = pfn;
1408 }
1409
1410 return true;
1411 }
1412
1413 return false;
1414}
1415
1416
1417
1418
1419
1420
1421
1422static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1423{
1424 pte_t *pte;
1425
1426 pte = pte_offset_kernel(pmd, addr);
1427 do {
1428 if (!pte_none(*pte)) {
1429 if (!kvm_s2pte_readonly(pte))
1430 kvm_set_s2pte_readonly(pte);
1431 }
1432 } while (pte++, addr += PAGE_SIZE, addr != end);
1433}
1434
1435
1436
1437
1438
1439
1440
1441
1442static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
1443 phys_addr_t addr, phys_addr_t end)
1444{
1445 pmd_t *pmd;
1446 phys_addr_t next;
1447
1448 pmd = stage2_pmd_offset(kvm, pud, addr);
1449
1450 do {
1451 next = stage2_pmd_addr_end(kvm, addr, end);
1452 if (!pmd_none(*pmd)) {
1453 if (pmd_thp_or_huge(*pmd)) {
1454 if (!kvm_s2pmd_readonly(pmd))
1455 kvm_set_s2pmd_readonly(pmd);
1456 } else {
1457 stage2_wp_ptes(pmd, addr, next);
1458 }
1459 }
1460 } while (pmd++, addr = next, addr != end);
1461}
1462
1463
1464
1465
1466
1467
1468
1469static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
1470 phys_addr_t addr, phys_addr_t end)
1471{
1472 pud_t *pud;
1473 phys_addr_t next;
1474
1475 pud = stage2_pud_offset(kvm, pgd, addr);
1476 do {
1477 next = stage2_pud_addr_end(kvm, addr, end);
1478 if (!stage2_pud_none(kvm, *pud)) {
1479 if (stage2_pud_huge(kvm, *pud)) {
1480 if (!kvm_s2pud_readonly(pud))
1481 kvm_set_s2pud_readonly(pud);
1482 } else {
1483 stage2_wp_pmds(kvm, pud, addr, next);
1484 }
1485 }
1486 } while (pud++, addr = next, addr != end);
1487}
1488
1489
1490
1491
1492
1493
1494
1495static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1496{
1497 pgd_t *pgd;
1498 phys_addr_t next;
1499
1500 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
1501 do {
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511 cond_resched_lock(&kvm->mmu_lock);
1512 if (!READ_ONCE(kvm->arch.pgd))
1513 break;
1514 next = stage2_pgd_addr_end(kvm, addr, end);
1515 if (stage2_pgd_present(kvm, *pgd))
1516 stage2_wp_puds(kvm, pgd, addr, next);
1517 } while (pgd++, addr = next, addr != end);
1518}
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1534{
1535 struct kvm_memslots *slots = kvm_memslots(kvm);
1536 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1537 phys_addr_t start, end;
1538
1539 if (WARN_ON_ONCE(!memslot))
1540 return;
1541
1542 start = memslot->base_gfn << PAGE_SHIFT;
1543 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1544
1545 spin_lock(&kvm->mmu_lock);
1546 stage2_wp_range(kvm, start, end);
1547 spin_unlock(&kvm->mmu_lock);
1548 kvm_flush_remote_tlbs(kvm);
1549}
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1563 struct kvm_memory_slot *slot,
1564 gfn_t gfn_offset, unsigned long mask)
1565{
1566 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1567 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1568 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1569
1570 stage2_wp_range(kvm, start, end);
1571}
1572
1573
1574
1575
1576
1577
1578
1579
1580void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1581 struct kvm_memory_slot *slot,
1582 gfn_t gfn_offset, unsigned long mask)
1583{
1584 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1585}
1586
1587static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
1588{
1589 __clean_dcache_guest_page(pfn, size);
1590}
1591
1592static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
1593{
1594 __invalidate_icache_guest_page(pfn, size);
1595}
1596
1597static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
1598{
1599 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
1600}
1601
1602static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
1603 unsigned long hva,
1604 unsigned long map_size)
1605{
1606 gpa_t gpa_start;
1607 hva_t uaddr_start, uaddr_end;
1608 size_t size;
1609
1610 size = memslot->npages * PAGE_SIZE;
1611
1612 gpa_start = memslot->base_gfn << PAGE_SHIFT;
1613
1614 uaddr_start = memslot->userspace_addr;
1615 uaddr_end = uaddr_start + size;
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
1641 return false;
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655 return (hva & ~(map_size - 1)) >= uaddr_start &&
1656 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
1657}
1658
1659static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1660 struct kvm_memory_slot *memslot, unsigned long hva,
1661 unsigned long fault_status)
1662{
1663 int ret;
1664 bool write_fault, writable, force_pte = false;
1665 bool exec_fault, needs_exec;
1666 unsigned long mmu_seq;
1667 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
1668 struct kvm *kvm = vcpu->kvm;
1669 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1670 struct vm_area_struct *vma;
1671 short vma_shift;
1672 kvm_pfn_t pfn;
1673 pgprot_t mem_type = PAGE_S2;
1674 bool logging_active = memslot_is_logging(memslot);
1675 unsigned long vma_pagesize, flags = 0;
1676
1677 write_fault = kvm_is_write_fault(vcpu);
1678 exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
1679 VM_BUG_ON(write_fault && exec_fault);
1680
1681 if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
1682 kvm_err("Unexpected L2 read permission error\n");
1683 return -EFAULT;
1684 }
1685
1686
1687 down_read(¤t->mm->mmap_sem);
1688 vma = find_vma_intersection(current->mm, hva, hva + 1);
1689 if (unlikely(!vma)) {
1690 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1691 up_read(¤t->mm->mmap_sem);
1692 return -EFAULT;
1693 }
1694
1695 if (is_vm_hugetlb_page(vma))
1696 vma_shift = huge_page_shift(hstate_vma(vma));
1697 else
1698 vma_shift = PAGE_SHIFT;
1699
1700 vma_pagesize = 1ULL << vma_shift;
1701 if (logging_active ||
1702 (vma->vm_flags & VM_PFNMAP) ||
1703 !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
1704 force_pte = true;
1705 vma_pagesize = PAGE_SIZE;
1706 }
1707
1708
1709
1710
1711
1712
1713
1714
1715 if (vma_pagesize == PMD_SIZE ||
1716 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
1717 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
1718 up_read(¤t->mm->mmap_sem);
1719
1720
1721 ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
1722 KVM_NR_MEM_OBJS);
1723 if (ret)
1724 return ret;
1725
1726 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736 smp_rmb();
1737
1738 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
1739 if (pfn == KVM_PFN_ERR_HWPOISON) {
1740 kvm_send_hwpoison_signal(hva, vma_shift);
1741 return 0;
1742 }
1743 if (is_error_noslot_pfn(pfn))
1744 return -EFAULT;
1745
1746 if (kvm_is_device_pfn(pfn)) {
1747 mem_type = PAGE_S2_DEVICE;
1748 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1749 } else if (logging_active) {
1750
1751
1752
1753
1754
1755 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1756
1757
1758
1759
1760
1761 if (!write_fault)
1762 writable = false;
1763 }
1764
1765 if (exec_fault && is_iomap(flags))
1766 return -ENOEXEC;
1767
1768 spin_lock(&kvm->mmu_lock);
1769 if (mmu_notifier_retry(kvm, mmu_seq))
1770 goto out_unlock;
1771
1772 if (vma_pagesize == PAGE_SIZE && !force_pte) {
1773
1774
1775
1776
1777
1778
1779
1780
1781 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
1782 transparent_hugepage_adjust(&pfn, &fault_ipa))
1783 vma_pagesize = PMD_SIZE;
1784 }
1785
1786 if (writable)
1787 kvm_set_pfn_dirty(pfn);
1788
1789 if (fault_status != FSC_PERM && !is_iomap(flags))
1790 clean_dcache_guest_page(pfn, vma_pagesize);
1791
1792 if (exec_fault)
1793 invalidate_icache_guest_page(pfn, vma_pagesize);
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803 needs_exec = exec_fault ||
1804 (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
1805
1806 if (vma_pagesize == PUD_SIZE) {
1807 pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
1808
1809 new_pud = kvm_pud_mkhuge(new_pud);
1810 if (writable)
1811 new_pud = kvm_s2pud_mkwrite(new_pud);
1812
1813 if (needs_exec)
1814 new_pud = kvm_s2pud_mkexec(new_pud);
1815
1816 ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud);
1817 } else if (vma_pagesize == PMD_SIZE) {
1818 pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
1819
1820 new_pmd = kvm_pmd_mkhuge(new_pmd);
1821
1822 if (writable)
1823 new_pmd = kvm_s2pmd_mkwrite(new_pmd);
1824
1825 if (needs_exec)
1826 new_pmd = kvm_s2pmd_mkexec(new_pmd);
1827
1828 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1829 } else {
1830 pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
1831
1832 if (writable) {
1833 new_pte = kvm_s2pte_mkwrite(new_pte);
1834 mark_page_dirty(kvm, gfn);
1835 }
1836
1837 if (needs_exec)
1838 new_pte = kvm_s2pte_mkexec(new_pte);
1839
1840 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
1841 }
1842
1843out_unlock:
1844 spin_unlock(&kvm->mmu_lock);
1845 kvm_set_pfn_accessed(pfn);
1846 kvm_release_pfn_clean(pfn);
1847 return ret;
1848}
1849
1850
1851
1852
1853
1854
1855
1856
1857static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1858{
1859 pud_t *pud;
1860 pmd_t *pmd;
1861 pte_t *pte;
1862 kvm_pfn_t pfn;
1863 bool pfn_valid = false;
1864
1865 trace_kvm_access_fault(fault_ipa);
1866
1867 spin_lock(&vcpu->kvm->mmu_lock);
1868
1869 if (!stage2_get_leaf_entry(vcpu->kvm, fault_ipa, &pud, &pmd, &pte))
1870 goto out;
1871
1872 if (pud) {
1873 *pud = kvm_s2pud_mkyoung(*pud);
1874 pfn = kvm_pud_pfn(*pud);
1875 pfn_valid = true;
1876 } else if (pmd) {
1877 *pmd = pmd_mkyoung(*pmd);
1878 pfn = pmd_pfn(*pmd);
1879 pfn_valid = true;
1880 } else {
1881 *pte = pte_mkyoung(*pte);
1882 pfn = pte_pfn(*pte);
1883 pfn_valid = true;
1884 }
1885
1886out:
1887 spin_unlock(&vcpu->kvm->mmu_lock);
1888 if (pfn_valid)
1889 kvm_set_pfn_accessed(pfn);
1890}
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1905{
1906 unsigned long fault_status;
1907 phys_addr_t fault_ipa;
1908 struct kvm_memory_slot *memslot;
1909 unsigned long hva;
1910 bool is_iabt, write_fault, writable;
1911 gfn_t gfn;
1912 int ret, idx;
1913
1914 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1915
1916 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1917 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1918
1919
1920 if (kvm_vcpu_dabt_isextabt(vcpu)) {
1921
1922
1923
1924
1925 if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
1926 return 1;
1927
1928 if (unlikely(!is_iabt)) {
1929 kvm_inject_vabt(vcpu);
1930 return 1;
1931 }
1932 }
1933
1934 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1935 kvm_vcpu_get_hfar(vcpu), fault_ipa);
1936
1937
1938 if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1939 fault_status != FSC_ACCESS) {
1940 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1941 kvm_vcpu_trap_get_class(vcpu),
1942 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1943 (unsigned long)kvm_vcpu_get_hsr(vcpu));
1944 return -EFAULT;
1945 }
1946
1947 idx = srcu_read_lock(&vcpu->kvm->srcu);
1948
1949 gfn = fault_ipa >> PAGE_SHIFT;
1950 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1951 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1952 write_fault = kvm_is_write_fault(vcpu);
1953 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1954 if (is_iabt) {
1955
1956 ret = -ENOEXEC;
1957 goto out;
1958 }
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970 if (kvm_vcpu_dabt_is_cm(vcpu)) {
1971 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1972 ret = 1;
1973 goto out_unlock;
1974 }
1975
1976
1977
1978
1979
1980
1981
1982 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1983 ret = io_mem_abort(vcpu, run, fault_ipa);
1984 goto out_unlock;
1985 }
1986
1987
1988 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
1989
1990 if (fault_status == FSC_ACCESS) {
1991 handle_access_fault(vcpu, fault_ipa);
1992 ret = 1;
1993 goto out_unlock;
1994 }
1995
1996 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1997 if (ret == 0)
1998 ret = 1;
1999out:
2000 if (ret == -ENOEXEC) {
2001 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
2002 ret = 1;
2003 }
2004out_unlock:
2005 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2006 return ret;
2007}
2008
2009static int handle_hva_to_gpa(struct kvm *kvm,
2010 unsigned long start,
2011 unsigned long end,
2012 int (*handler)(struct kvm *kvm,
2013 gpa_t gpa, u64 size,
2014 void *data),
2015 void *data)
2016{
2017 struct kvm_memslots *slots;
2018 struct kvm_memory_slot *memslot;
2019 int ret = 0;
2020
2021 slots = kvm_memslots(kvm);
2022
2023
2024 kvm_for_each_memslot(memslot, slots) {
2025 unsigned long hva_start, hva_end;
2026 gfn_t gpa;
2027
2028 hva_start = max(start, memslot->userspace_addr);
2029 hva_end = min(end, memslot->userspace_addr +
2030 (memslot->npages << PAGE_SHIFT));
2031 if (hva_start >= hva_end)
2032 continue;
2033
2034 gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
2035 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
2036 }
2037
2038 return ret;
2039}
2040
2041static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2042{
2043 unmap_stage2_range(kvm, gpa, size);
2044 return 0;
2045}
2046
2047int kvm_unmap_hva_range(struct kvm *kvm,
2048 unsigned long start, unsigned long end)
2049{
2050 if (!kvm->arch.pgd)
2051 return 0;
2052
2053 trace_kvm_unmap_hva_range(start, end);
2054 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
2055 return 0;
2056}
2057
2058static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2059{
2060 pte_t *pte = (pte_t *)data;
2061
2062 WARN_ON(size != PAGE_SIZE);
2063
2064
2065
2066
2067
2068
2069
2070 stage2_set_pte(kvm, NULL, gpa, pte, 0);
2071 return 0;
2072}
2073
2074
2075int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
2076{
2077 unsigned long end = hva + PAGE_SIZE;
2078 kvm_pfn_t pfn = pte_pfn(pte);
2079 pte_t stage2_pte;
2080
2081 if (!kvm->arch.pgd)
2082 return 0;
2083
2084 trace_kvm_set_spte_hva(hva);
2085
2086
2087
2088
2089
2090 clean_dcache_guest_page(pfn, PAGE_SIZE);
2091 stage2_pte = kvm_pfn_pte(pfn, PAGE_S2);
2092 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
2093
2094 return 0;
2095}
2096
2097static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2098{
2099 pud_t *pud;
2100 pmd_t *pmd;
2101 pte_t *pte;
2102
2103 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
2104 if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
2105 return 0;
2106
2107 if (pud)
2108 return stage2_pudp_test_and_clear_young(pud);
2109 else if (pmd)
2110 return stage2_pmdp_test_and_clear_young(pmd);
2111 else
2112 return stage2_ptep_test_and_clear_young(pte);
2113}
2114
2115static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2116{
2117 pud_t *pud;
2118 pmd_t *pmd;
2119 pte_t *pte;
2120
2121 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
2122 if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
2123 return 0;
2124
2125 if (pud)
2126 return kvm_s2pud_young(*pud);
2127 else if (pmd)
2128 return pmd_young(*pmd);
2129 else
2130 return pte_young(*pte);
2131}
2132
2133int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
2134{
2135 if (!kvm->arch.pgd)
2136 return 0;
2137 trace_kvm_age_hva(start, end);
2138 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
2139}
2140
2141int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
2142{
2143 if (!kvm->arch.pgd)
2144 return 0;
2145 trace_kvm_test_age_hva(hva);
2146 return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
2147 kvm_test_age_hva_handler, NULL);
2148}
2149
2150void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
2151{
2152 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
2153}
2154
2155phys_addr_t kvm_mmu_get_httbr(void)
2156{
2157 if (__kvm_cpu_uses_extended_idmap())
2158 return virt_to_phys(merged_hyp_pgd);
2159 else
2160 return virt_to_phys(hyp_pgd);
2161}
2162
2163phys_addr_t kvm_get_idmap_vector(void)
2164{
2165 return hyp_idmap_vector;
2166}
2167
2168static int kvm_map_idmap_text(pgd_t *pgd)
2169{
2170 int err;
2171
2172
2173 err = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
2174 hyp_idmap_start, hyp_idmap_end,
2175 __phys_to_pfn(hyp_idmap_start),
2176 PAGE_HYP_EXEC);
2177 if (err)
2178 kvm_err("Failed to idmap %lx-%lx\n",
2179 hyp_idmap_start, hyp_idmap_end);
2180
2181 return err;
2182}
2183
2184int kvm_mmu_init(void)
2185{
2186 int err;
2187
2188 hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
2189 hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
2190 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
2191 hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
2192 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
2193
2194
2195
2196
2197
2198 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
2199
2200 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
2201 kvm_debug("HYP VA range: %lx:%lx\n",
2202 kern_hyp_va(PAGE_OFFSET),
2203 kern_hyp_va((unsigned long)high_memory - 1));
2204
2205 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
2206 hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) &&
2207 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
2208
2209
2210
2211
2212 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
2213 err = -EINVAL;
2214 goto out;
2215 }
2216
2217 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
2218 if (!hyp_pgd) {
2219 kvm_err("Hyp mode PGD not allocated\n");
2220 err = -ENOMEM;
2221 goto out;
2222 }
2223
2224 if (__kvm_cpu_uses_extended_idmap()) {
2225 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2226 hyp_pgd_order);
2227 if (!boot_hyp_pgd) {
2228 kvm_err("Hyp boot PGD not allocated\n");
2229 err = -ENOMEM;
2230 goto out;
2231 }
2232
2233 err = kvm_map_idmap_text(boot_hyp_pgd);
2234 if (err)
2235 goto out;
2236
2237 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
2238 if (!merged_hyp_pgd) {
2239 kvm_err("Failed to allocate extra HYP pgd\n");
2240 goto out;
2241 }
2242 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
2243 hyp_idmap_start);
2244 } else {
2245 err = kvm_map_idmap_text(hyp_pgd);
2246 if (err)
2247 goto out;
2248 }
2249
2250 io_map_base = hyp_idmap_start;
2251 return 0;
2252out:
2253 free_hyp_pgds();
2254 return err;
2255}
2256
2257void kvm_arch_commit_memory_region(struct kvm *kvm,
2258 const struct kvm_userspace_memory_region *mem,
2259 struct kvm_memory_slot *old,
2260 const struct kvm_memory_slot *new,
2261 enum kvm_mr_change change)
2262{
2263
2264
2265
2266
2267
2268 if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
2269 kvm_mmu_wp_memory_region(kvm, mem->slot);
2270}
2271
2272int kvm_arch_prepare_memory_region(struct kvm *kvm,
2273 struct kvm_memory_slot *memslot,
2274 const struct kvm_userspace_memory_region *mem,
2275 enum kvm_mr_change change)
2276{
2277 hva_t hva = mem->userspace_addr;
2278 hva_t reg_end = hva + mem->memory_size;
2279 bool writable = !(mem->flags & KVM_MEM_READONLY);
2280 int ret = 0;
2281
2282 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
2283 change != KVM_MR_FLAGS_ONLY)
2284 return 0;
2285
2286
2287
2288
2289
2290 if (memslot->base_gfn + memslot->npages >=
2291 (kvm_phys_size(kvm) >> PAGE_SHIFT))
2292 return -EFAULT;
2293
2294 down_read(¤t->mm->mmap_sem);
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307 do {
2308 struct vm_area_struct *vma = find_vma(current->mm, hva);
2309 hva_t vm_start, vm_end;
2310
2311 if (!vma || vma->vm_start >= reg_end)
2312 break;
2313
2314
2315
2316
2317 vm_start = max(hva, vma->vm_start);
2318 vm_end = min(reg_end, vma->vm_end);
2319
2320 if (vma->vm_flags & VM_PFNMAP) {
2321 gpa_t gpa = mem->guest_phys_addr +
2322 (vm_start - mem->userspace_addr);
2323 phys_addr_t pa;
2324
2325 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
2326 pa += vm_start - vma->vm_start;
2327
2328
2329 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2330 ret = -EINVAL;
2331 goto out;
2332 }
2333
2334 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
2335 vm_end - vm_start,
2336 writable);
2337 if (ret)
2338 break;
2339 }
2340 hva = vm_end;
2341 } while (hva < reg_end);
2342
2343 if (change == KVM_MR_FLAGS_ONLY)
2344 goto out;
2345
2346 spin_lock(&kvm->mmu_lock);
2347 if (ret)
2348 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
2349 else
2350 stage2_flush_memslot(kvm, memslot);
2351 spin_unlock(&kvm->mmu_lock);
2352out:
2353 up_read(¤t->mm->mmap_sem);
2354 return ret;
2355}
2356
2357void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
2358{
2359}
2360
2361void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
2362{
2363}
2364
2365void kvm_arch_flush_shadow_all(struct kvm *kvm)
2366{
2367 kvm_free_stage2_pgd(kvm);
2368}
2369
2370void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
2371 struct kvm_memory_slot *slot)
2372{
2373 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
2374 phys_addr_t size = slot->npages << PAGE_SHIFT;
2375
2376 spin_lock(&kvm->mmu_lock);
2377 unmap_stage2_range(kvm, gpa, size);
2378 spin_unlock(&kvm->mmu_lock);
2379}
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409void kvm_set_way_flush(struct kvm_vcpu *vcpu)
2410{
2411 unsigned long hcr = *vcpu_hcr(vcpu);
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422 if (!(hcr & HCR_TVM)) {
2423 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
2424 vcpu_has_cache_enabled(vcpu));
2425 stage2_flush_vm(vcpu->kvm);
2426 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
2427 }
2428}
2429
2430void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
2431{
2432 bool now_enabled = vcpu_has_cache_enabled(vcpu);
2433
2434
2435
2436
2437
2438
2439 if (now_enabled != was_enabled)
2440 stage2_flush_vm(vcpu->kvm);
2441
2442
2443 if (now_enabled)
2444 *vcpu_hcr(vcpu) &= ~HCR_TVM;
2445
2446 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
2447}
2448