1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
23
24
25
26
27
28
29
30
31
32
33#define HYP_PAGE_OFFSET_SHIFT VA_BITS
34#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
35#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
36
37
38
39
40
41
42#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
43
44
45
46
47
48
49
50#ifdef CONFIG_ARM64_64K_PAGES
51#define KVM_MMU_CACHE_MIN_PAGES 1
52#else
53#define KVM_MMU_CACHE_MIN_PAGES 2
54#endif
55
56#ifdef __ASSEMBLY__
57
58
59
60
61
62.macro kern_hyp_va reg
63 and \reg, \reg, #HYP_PAGE_OFFSET_MASK
64.endm
65
66#else
67
68#include <asm/pgalloc.h>
69#include <asm/cachetype.h>
70#include <asm/cacheflush.h>
71#include <asm/mmu_context.h>
72#include <asm/pgtable.h>
73
74#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
75
76
77
78
79#define KVM_PHYS_SHIFT (40)
80#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
81#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
82
83int create_hyp_mappings(void *from, void *to);
84int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
85void free_boot_hyp_pgd(void);
86void free_hyp_pgds(void);
87
88void stage2_unmap_vm(struct kvm *kvm);
89int kvm_alloc_stage2_pgd(struct kvm *kvm);
90void kvm_free_stage2_pgd(struct kvm *kvm);
91int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
92 phys_addr_t pa, unsigned long size, bool writable);
93
94int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
95
96void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
97
98phys_addr_t kvm_mmu_get_httbr(void);
99phys_addr_t kvm_mmu_get_boot_httbr(void);
100phys_addr_t kvm_get_idmap_vector(void);
101int kvm_mmu_init(void);
102void kvm_clear_hyp_idmap(void);
103
104#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
105#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
106
107static inline void kvm_clean_pgd(pgd_t *pgd) {}
108static inline void kvm_clean_pmd(pmd_t *pmd) {}
109static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
110static inline void kvm_clean_pte(pte_t *pte) {}
111static inline void kvm_clean_pte_entry(pte_t *pte) {}
112
113static inline void kvm_set_s2pte_writable(pte_t *pte)
114{
115 pte_val(*pte) |= PTE_S2_RDWR;
116}
117
118static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
119{
120 pmd_val(*pmd) |= PMD_S2_RDWR;
121}
122
123static inline void kvm_set_s2pte_readonly(pte_t *pte)
124{
125 pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY;
126}
127
128static inline bool kvm_s2pte_readonly(pte_t *pte)
129{
130 return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
131}
132
133static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
134{
135 pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
136}
137
138static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
139{
140 return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
141}
142
143
144#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
145#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
146#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
147
148
149
150
151
152
153
154
155#if PGDIR_SHIFT > KVM_PHYS_SHIFT
156#define PTRS_PER_S2_PGD_SHIFT 0
157#else
158#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
159#endif
160#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
161#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
162
163#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
164
165
166
167
168
169
170
171
172#if PTRS_PER_S2_PGD <= 16
173#define KVM_PREALLOC_LEVEL (4 - CONFIG_PGTABLE_LEVELS + 1)
174#else
175#define KVM_PREALLOC_LEVEL (0)
176#endif
177
178static inline void *kvm_get_hwpgd(struct kvm *kvm)
179{
180 pgd_t *pgd = kvm->arch.pgd;
181 pud_t *pud;
182
183 if (KVM_PREALLOC_LEVEL == 0)
184 return pgd;
185
186 pud = pud_offset(pgd, 0);
187 if (KVM_PREALLOC_LEVEL == 1)
188 return pud;
189
190 BUG_ON(KVM_PREALLOC_LEVEL != 2);
191 return pmd_offset(pud, 0);
192}
193
194static inline unsigned int kvm_get_hwpgd_size(void)
195{
196 if (KVM_PREALLOC_LEVEL > 0)
197 return PTRS_PER_S2_PGD * PAGE_SIZE;
198 return PTRS_PER_S2_PGD * sizeof(pgd_t);
199}
200
201static inline bool kvm_page_empty(void *ptr)
202{
203 struct page *ptr_page = virt_to_page(ptr);
204 return page_count(ptr_page) == 1;
205}
206
207#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
208
209#ifdef __PAGETABLE_PMD_FOLDED
210#define kvm_pmd_table_empty(kvm, pmdp) (0)
211#else
212#define kvm_pmd_table_empty(kvm, pmdp) \
213 (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
214#endif
215
216#ifdef __PAGETABLE_PUD_FOLDED
217#define kvm_pud_table_empty(kvm, pudp) (0)
218#else
219#define kvm_pud_table_empty(kvm, pudp) \
220 (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
221#endif
222
223
224struct kvm;
225
226#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
227
228static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
229{
230 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
231}
232
233static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
234 unsigned long size,
235 bool ipa_uncached)
236{
237 void *va = page_address(pfn_to_page(pfn));
238
239 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
240 kvm_flush_dcache_to_poc(va, size);
241
242 if (!icache_is_aliasing()) {
243 flush_icache_range((unsigned long)va,
244 (unsigned long)va + size);
245 } else if (!icache_is_aivivt()) {
246
247 __flush_icache_all();
248 }
249}
250
251static inline void __kvm_flush_dcache_pte(pte_t pte)
252{
253 struct page *page = pte_page(pte);
254 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
255}
256
257static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
258{
259 struct page *page = pmd_page(pmd);
260 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
261}
262
263static inline void __kvm_flush_dcache_pud(pud_t pud)
264{
265 struct page *page = pud_page(pud);
266 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
267}
268
269#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
270
271void kvm_set_way_flush(struct kvm_vcpu *vcpu);
272void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
273
274static inline bool __kvm_cpu_uses_extended_idmap(void)
275{
276 return __cpu_uses_extended_idmap();
277}
278
279static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
280 pgd_t *hyp_pgd,
281 pgd_t *merged_hyp_pgd,
282 unsigned long hyp_idmap_start)
283{
284 int idmap_idx;
285
286
287
288
289
290
291 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
292 merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
293
294
295
296
297
298
299
300 idmap_idx = hyp_idmap_start >> VA_BITS;
301 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
302 merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
303}
304
305#endif
306#endif
307