1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
22#include <trace/events/kvm.h>
23#include <asm/pgalloc.h>
24#include <asm/cacheflush.h>
25#include <asm/kvm_arm.h>
26#include <asm/kvm_mmu.h>
27#include <asm/kvm_mmio.h>
28#include <asm/kvm_asm.h>
29#include <asm/kvm_emulate.h>
30
31#include "trace.h"
32
33extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
34
35static pgd_t *boot_hyp_pgd;
36static pgd_t *hyp_pgd;
37static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
38
39static void *init_bounce_page;
40static unsigned long hyp_idmap_start;
41static unsigned long hyp_idmap_end;
42static phys_addr_t hyp_idmap_vector;
43
44static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
45{
46
47
48
49
50
51
52 if (kvm)
53 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
54}
55
56static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
57 int min, int max)
58{
59 void *page;
60
61 BUG_ON(max > KVM_NR_MEM_OBJS);
62 if (cache->nobjs >= min)
63 return 0;
64 while (cache->nobjs < max) {
65 page = (void *)__get_free_page(PGALLOC_GFP);
66 if (!page)
67 return -ENOMEM;
68 cache->objects[cache->nobjs++] = page;
69 }
70 return 0;
71}
72
73static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
74{
75 while (mc->nobjs)
76 free_page((unsigned long)mc->objects[--mc->nobjs]);
77}
78
79static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
80{
81 void *p;
82
83 BUG_ON(!mc || !mc->nobjs);
84 p = mc->objects[--mc->nobjs];
85 return p;
86}
87
88static bool page_empty(void *ptr)
89{
90 struct page *ptr_page = virt_to_page(ptr);
91 return page_count(ptr_page) == 1;
92}
93
94static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
95{
96 pmd_t *pmd_table = pmd_offset(pud, 0);
97 pud_clear(pud);
98 kvm_tlb_flush_vmid_ipa(kvm, addr);
99 pmd_free(NULL, pmd_table);
100 put_page(virt_to_page(pud));
101}
102
103static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
104{
105 pte_t *pte_table = pte_offset_kernel(pmd, 0);
106 pmd_clear(pmd);
107 kvm_tlb_flush_vmid_ipa(kvm, addr);
108 pte_free_kernel(NULL, pte_table);
109 put_page(virt_to_page(pmd));
110}
111
112static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
113{
114 if (pte_present(*pte)) {
115 kvm_set_pte(pte, __pte(0));
116 put_page(virt_to_page(pte));
117 kvm_tlb_flush_vmid_ipa(kvm, addr);
118 }
119}
120
121static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
122 unsigned long long start, u64 size)
123{
124 pgd_t *pgd;
125 pud_t *pud;
126 pmd_t *pmd;
127 pte_t *pte;
128 unsigned long long addr = start, end = start + size;
129 u64 next;
130
131 while (addr < end) {
132 pgd = pgdp + pgd_index(addr);
133 pud = pud_offset(pgd, addr);
134 if (pud_none(*pud)) {
135 addr = pud_addr_end(addr, end);
136 continue;
137 }
138
139 pmd = pmd_offset(pud, addr);
140 if (pmd_none(*pmd)) {
141 addr = pmd_addr_end(addr, end);
142 continue;
143 }
144
145 pte = pte_offset_kernel(pmd, addr);
146 clear_pte_entry(kvm, pte, addr);
147 next = addr + PAGE_SIZE;
148
149
150 if (page_empty(pte)) {
151 clear_pmd_entry(kvm, pmd, addr);
152 next = pmd_addr_end(addr, end);
153 if (page_empty(pmd) && !page_empty(pud)) {
154 clear_pud_entry(kvm, pud, addr);
155 next = pud_addr_end(addr, end);
156 }
157 }
158
159 addr = next;
160 }
161}
162
163
164
165
166
167
168void free_boot_hyp_pgd(void)
169{
170 mutex_lock(&kvm_hyp_pgd_mutex);
171
172 if (boot_hyp_pgd) {
173 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
174 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
175 kfree(boot_hyp_pgd);
176 boot_hyp_pgd = NULL;
177 }
178
179 if (hyp_pgd)
180 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
181
182 kfree(init_bounce_page);
183 init_bounce_page = NULL;
184
185 mutex_unlock(&kvm_hyp_pgd_mutex);
186}
187
188
189
190
191
192
193
194
195
196
197
198void free_hyp_pgds(void)
199{
200 unsigned long addr;
201
202 free_boot_hyp_pgd();
203
204 mutex_lock(&kvm_hyp_pgd_mutex);
205
206 if (hyp_pgd) {
207 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
208 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
209 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
210 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
211
212 kfree(hyp_pgd);
213 hyp_pgd = NULL;
214 }
215
216 mutex_unlock(&kvm_hyp_pgd_mutex);
217}
218
219static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
220 unsigned long end, unsigned long pfn,
221 pgprot_t prot)
222{
223 pte_t *pte;
224 unsigned long addr;
225
226 addr = start;
227 do {
228 pte = pte_offset_kernel(pmd, addr);
229 kvm_set_pte(pte, pfn_pte(pfn, prot));
230 get_page(virt_to_page(pte));
231 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
232 pfn++;
233 } while (addr += PAGE_SIZE, addr != end);
234}
235
236static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
237 unsigned long end, unsigned long pfn,
238 pgprot_t prot)
239{
240 pmd_t *pmd;
241 pte_t *pte;
242 unsigned long addr, next;
243
244 addr = start;
245 do {
246 pmd = pmd_offset(pud, addr);
247
248 BUG_ON(pmd_sect(*pmd));
249
250 if (pmd_none(*pmd)) {
251 pte = pte_alloc_one_kernel(NULL, addr);
252 if (!pte) {
253 kvm_err("Cannot allocate Hyp pte\n");
254 return -ENOMEM;
255 }
256 pmd_populate_kernel(NULL, pmd, pte);
257 get_page(virt_to_page(pmd));
258 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
259 }
260
261 next = pmd_addr_end(addr, end);
262
263 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
264 pfn += (next - addr) >> PAGE_SHIFT;
265 } while (addr = next, addr != end);
266
267 return 0;
268}
269
270static int __create_hyp_mappings(pgd_t *pgdp,
271 unsigned long start, unsigned long end,
272 unsigned long pfn, pgprot_t prot)
273{
274 pgd_t *pgd;
275 pud_t *pud;
276 pmd_t *pmd;
277 unsigned long addr, next;
278 int err = 0;
279
280 mutex_lock(&kvm_hyp_pgd_mutex);
281 addr = start & PAGE_MASK;
282 end = PAGE_ALIGN(end);
283 do {
284 pgd = pgdp + pgd_index(addr);
285 pud = pud_offset(pgd, addr);
286
287 if (pud_none_or_clear_bad(pud)) {
288 pmd = pmd_alloc_one(NULL, addr);
289 if (!pmd) {
290 kvm_err("Cannot allocate Hyp pmd\n");
291 err = -ENOMEM;
292 goto out;
293 }
294 pud_populate(NULL, pud, pmd);
295 get_page(virt_to_page(pud));
296 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
297 }
298
299 next = pgd_addr_end(addr, end);
300 err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
301 if (err)
302 goto out;
303 pfn += (next - addr) >> PAGE_SHIFT;
304 } while (addr = next, addr != end);
305out:
306 mutex_unlock(&kvm_hyp_pgd_mutex);
307 return err;
308}
309
310
311
312
313
314
315
316
317
318
319int create_hyp_mappings(void *from, void *to)
320{
321 unsigned long phys_addr = virt_to_phys(from);
322 unsigned long start = KERN_TO_HYP((unsigned long)from);
323 unsigned long end = KERN_TO_HYP((unsigned long)to);
324
325
326 if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
327 return -EINVAL;
328
329 return __create_hyp_mappings(hyp_pgd, start, end,
330 __phys_to_pfn(phys_addr), PAGE_HYP);
331}
332
333
334
335
336
337
338
339
340
341
342int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
343{
344 unsigned long start = KERN_TO_HYP((unsigned long)from);
345 unsigned long end = KERN_TO_HYP((unsigned long)to);
346
347
348 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
349 return -EINVAL;
350
351 return __create_hyp_mappings(hyp_pgd, start, end,
352 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
353}
354
355
356
357
358
359
360
361
362
363
364
365
366int kvm_alloc_stage2_pgd(struct kvm *kvm)
367{
368 pgd_t *pgd;
369
370 if (kvm->arch.pgd != NULL) {
371 kvm_err("kvm_arch already initialized?\n");
372 return -EINVAL;
373 }
374
375 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
376 if (!pgd)
377 return -ENOMEM;
378
379 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
380 kvm_clean_pgd(pgd);
381 kvm->arch.pgd = pgd;
382
383 return 0;
384}
385
386
387
388
389
390
391
392
393
394
395
396
397static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
398{
399 unmap_range(kvm, kvm->arch.pgd, start, size);
400}
401
402
403
404
405
406
407
408
409
410
411
412
413void kvm_free_stage2_pgd(struct kvm *kvm)
414{
415 if (kvm->arch.pgd == NULL)
416 return;
417
418 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
419 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
420 kvm->arch.pgd = NULL;
421}
422
423
424static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
425 phys_addr_t addr, const pte_t *new_pte, bool iomap)
426{
427 pgd_t *pgd;
428 pud_t *pud;
429 pmd_t *pmd;
430 pte_t *pte, old_pte;
431
432
433 pgd = kvm->arch.pgd + pgd_index(addr);
434 pud = pud_offset(pgd, addr);
435 if (pud_none(*pud)) {
436 if (!cache)
437 return 0;
438 pmd = mmu_memory_cache_alloc(cache);
439 pud_populate(NULL, pud, pmd);
440 get_page(virt_to_page(pud));
441 }
442
443 pmd = pmd_offset(pud, addr);
444
445
446 if (pmd_none(*pmd)) {
447 if (!cache)
448 return 0;
449 pte = mmu_memory_cache_alloc(cache);
450 kvm_clean_pte(pte);
451 pmd_populate_kernel(NULL, pmd, pte);
452 get_page(virt_to_page(pmd));
453 }
454
455 pte = pte_offset_kernel(pmd, addr);
456
457 if (iomap && pte_present(*pte))
458 return -EFAULT;
459
460
461 old_pte = *pte;
462 kvm_set_pte(pte, *new_pte);
463 if (pte_present(old_pte))
464 kvm_tlb_flush_vmid_ipa(kvm, addr);
465 else
466 get_page(virt_to_page(pte));
467
468 return 0;
469}
470
471
472
473
474
475
476
477
478
479int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
480 phys_addr_t pa, unsigned long size)
481{
482 phys_addr_t addr, end;
483 int ret = 0;
484 unsigned long pfn;
485 struct kvm_mmu_memory_cache cache = { 0, };
486
487 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
488 pfn = __phys_to_pfn(pa);
489
490 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
491 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
492 kvm_set_s2pte_writable(&pte);
493
494 ret = mmu_topup_memory_cache(&cache, 2, 2);
495 if (ret)
496 goto out;
497 spin_lock(&kvm->mmu_lock);
498 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
499 spin_unlock(&kvm->mmu_lock);
500 if (ret)
501 goto out;
502
503 pfn++;
504 }
505
506out:
507 mmu_free_memory_cache(&cache);
508 return ret;
509}
510
511static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
512 gfn_t gfn, struct kvm_memory_slot *memslot,
513 unsigned long fault_status)
514{
515 pte_t new_pte;
516 pfn_t pfn;
517 int ret;
518 bool write_fault, writable;
519 unsigned long mmu_seq;
520 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
521
522 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
523 if (fault_status == FSC_PERM && !write_fault) {
524 kvm_err("Unexpected L2 read permission error\n");
525 return -EFAULT;
526 }
527
528
529 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
530 if (ret)
531 return ret;
532
533 mmu_seq = vcpu->kvm->mmu_notifier_seq;
534
535
536
537
538
539
540
541
542
543 smp_rmb();
544
545 pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
546 if (is_error_pfn(pfn))
547 return -EFAULT;
548
549 new_pte = pfn_pte(pfn, PAGE_S2);
550 coherent_icache_guest_page(vcpu->kvm, gfn);
551
552 spin_lock(&vcpu->kvm->mmu_lock);
553 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
554 goto out_unlock;
555 if (writable) {
556 kvm_set_s2pte_writable(&new_pte);
557 kvm_set_pfn_dirty(pfn);
558 }
559 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
560
561out_unlock:
562 spin_unlock(&vcpu->kvm->mmu_lock);
563 kvm_release_pfn_clean(pfn);
564 return 0;
565}
566
567
568
569
570
571
572
573
574
575
576
577
578
579int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
580{
581 unsigned long fault_status;
582 phys_addr_t fault_ipa;
583 struct kvm_memory_slot *memslot;
584 bool is_iabt;
585 gfn_t gfn;
586 int ret, idx;
587
588 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
589 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
590
591 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
592 kvm_vcpu_get_hfar(vcpu), fault_ipa);
593
594
595 fault_status = kvm_vcpu_trap_get_fault(vcpu);
596 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
597 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
598 kvm_vcpu_trap_get_class(vcpu), fault_status);
599 return -EFAULT;
600 }
601
602 idx = srcu_read_lock(&vcpu->kvm->srcu);
603
604 gfn = fault_ipa >> PAGE_SHIFT;
605 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
606 if (is_iabt) {
607
608 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
609 ret = 1;
610 goto out_unlock;
611 }
612
613 if (fault_status != FSC_FAULT) {
614 kvm_err("Unsupported fault status on io memory: %#lx\n",
615 fault_status);
616 ret = -EFAULT;
617 goto out_unlock;
618 }
619
620
621
622
623
624
625
626 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
627 ret = io_mem_abort(vcpu, run, fault_ipa);
628 goto out_unlock;
629 }
630
631 memslot = gfn_to_memslot(vcpu->kvm, gfn);
632
633 ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
634 if (ret == 0)
635 ret = 1;
636out_unlock:
637 srcu_read_unlock(&vcpu->kvm->srcu, idx);
638 return ret;
639}
640
641static void handle_hva_to_gpa(struct kvm *kvm,
642 unsigned long start,
643 unsigned long end,
644 void (*handler)(struct kvm *kvm,
645 gpa_t gpa, void *data),
646 void *data)
647{
648 struct kvm_memslots *slots;
649 struct kvm_memory_slot *memslot;
650
651 slots = kvm_memslots(kvm);
652
653
654 kvm_for_each_memslot(memslot, slots) {
655 unsigned long hva_start, hva_end;
656 gfn_t gfn, gfn_end;
657
658 hva_start = max(start, memslot->userspace_addr);
659 hva_end = min(end, memslot->userspace_addr +
660 (memslot->npages << PAGE_SHIFT));
661 if (hva_start >= hva_end)
662 continue;
663
664
665
666
667
668 gfn = hva_to_gfn_memslot(hva_start, memslot);
669 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
670
671 for (; gfn < gfn_end; ++gfn) {
672 gpa_t gpa = gfn << PAGE_SHIFT;
673 handler(kvm, gpa, data);
674 }
675 }
676}
677
678static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
679{
680 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
681}
682
683int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
684{
685 unsigned long end = hva + PAGE_SIZE;
686
687 if (!kvm->arch.pgd)
688 return 0;
689
690 trace_kvm_unmap_hva(hva);
691 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
692 return 0;
693}
694
695int kvm_unmap_hva_range(struct kvm *kvm,
696 unsigned long start, unsigned long end)
697{
698 if (!kvm->arch.pgd)
699 return 0;
700
701 trace_kvm_unmap_hva_range(start, end);
702 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
703 return 0;
704}
705
706static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
707{
708 pte_t *pte = (pte_t *)data;
709
710 stage2_set_pte(kvm, NULL, gpa, pte, false);
711}
712
713
714void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
715{
716 unsigned long end = hva + PAGE_SIZE;
717 pte_t stage2_pte;
718
719 if (!kvm->arch.pgd)
720 return;
721
722 trace_kvm_set_spte_hva(hva);
723 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
724 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
725}
726
727void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
728{
729 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
730}
731
732phys_addr_t kvm_mmu_get_httbr(void)
733{
734 return virt_to_phys(hyp_pgd);
735}
736
737phys_addr_t kvm_mmu_get_boot_httbr(void)
738{
739 return virt_to_phys(boot_hyp_pgd);
740}
741
742phys_addr_t kvm_get_idmap_vector(void)
743{
744 return hyp_idmap_vector;
745}
746
747int kvm_mmu_init(void)
748{
749 int err;
750
751 hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start);
752 hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end);
753 hyp_idmap_vector = virt_to_phys(__kvm_hyp_init);
754
755 if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
756
757
758
759
760 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
761 phys_addr_t phys_base;
762
763 init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
764 if (!init_bounce_page) {
765 kvm_err("Couldn't allocate HYP init bounce page\n");
766 err = -ENOMEM;
767 goto out;
768 }
769
770 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
771
772
773
774
775
776
777
778 kvm_flush_dcache_to_poc(init_bounce_page, len);
779
780 phys_base = virt_to_phys(init_bounce_page);
781 hyp_idmap_vector += phys_base - hyp_idmap_start;
782 hyp_idmap_start = phys_base;
783 hyp_idmap_end = phys_base + len;
784
785 kvm_info("Using HYP init bounce page @%lx\n",
786 (unsigned long)phys_base);
787 }
788
789 hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
790 boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
791 if (!hyp_pgd || !boot_hyp_pgd) {
792 kvm_err("Hyp mode PGD not allocated\n");
793 err = -ENOMEM;
794 goto out;
795 }
796
797
798 err = __create_hyp_mappings(boot_hyp_pgd,
799 hyp_idmap_start, hyp_idmap_end,
800 __phys_to_pfn(hyp_idmap_start),
801 PAGE_HYP);
802
803 if (err) {
804 kvm_err("Failed to idmap %lx-%lx\n",
805 hyp_idmap_start, hyp_idmap_end);
806 goto out;
807 }
808
809
810 err = __create_hyp_mappings(boot_hyp_pgd,
811 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
812 __phys_to_pfn(hyp_idmap_start),
813 PAGE_HYP);
814 if (err) {
815 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
816 TRAMPOLINE_VA);
817 goto out;
818 }
819
820
821 err = __create_hyp_mappings(hyp_pgd,
822 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
823 __phys_to_pfn(hyp_idmap_start),
824 PAGE_HYP);
825 if (err) {
826 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
827 TRAMPOLINE_VA);
828 goto out;
829 }
830
831 return 0;
832out:
833 free_hyp_pgds();
834 return err;
835}
836