1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
23#include <asm/cpufeature.h>
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72#ifdef __ASSEMBLY__
73
74#include <asm/alternative.h>
75#include <asm/cpufeature.h>
76
77
78
79
80
81
82
83
84
85
86.macro kern_hyp_va reg
87alternative_cb kvm_update_va_mask
88 and \reg, \reg, #1
89 ror \reg, \reg, #1
90 add \reg, \reg, #0
91 add \reg, \reg, #0, lsl 12
92 ror \reg, \reg, #63
93alternative_cb_end
94.endm
95
96#else
97
98#include <asm/pgalloc.h>
99#include <asm/cache.h>
100#include <asm/cacheflush.h>
101#include <asm/mmu_context.h>
102#include <asm/pgtable.h>
103
104void kvm_update_va_mask(struct alt_instr *alt,
105 __le32 *origptr, __le32 *updptr, int nr_inst);
106
107static inline unsigned long __kern_hyp_va(unsigned long v)
108{
109 asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
110 "ror %0, %0, #1\n"
111 "add %0, %0, #0\n"
112 "add %0, %0, #0, lsl 12\n"
113 "ror %0, %0, #63\n",
114 kvm_update_va_mask)
115 : "+r" (v));
116 return v;
117}
118
119#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
120
121
122
123
124
125
126
127
128
129
130
131
132#define hyp_symbol_addr(s) \
133 ({ \
134 typeof(s) *addr; \
135 asm("adrp %0, %1\n" \
136 "add %0, %0, :lo12:%1\n" \
137 : "=r" (addr) : "S" (&s)); \
138 addr; \
139 })
140
141
142
143
144#define KVM_PHYS_SHIFT (40)
145#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
146#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
147
148#include <asm/stage2_pgtable.h>
149
150int create_hyp_mappings(void *from, void *to, pgprot_t prot);
151int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
152 void __iomem **kaddr,
153 void __iomem **haddr);
154int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
155 void **haddr);
156void free_hyp_pgds(void);
157
158void stage2_unmap_vm(struct kvm *kvm);
159int kvm_alloc_stage2_pgd(struct kvm *kvm);
160void kvm_free_stage2_pgd(struct kvm *kvm);
161int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
162 phys_addr_t pa, unsigned long size, bool writable);
163
164int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
165
166void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
167
168phys_addr_t kvm_mmu_get_httbr(void);
169phys_addr_t kvm_get_idmap_vector(void);
170int kvm_mmu_init(void);
171void kvm_clear_hyp_idmap(void);
172
173#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
174#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
175
176static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
177{
178 pte_val(pte) |= PTE_S2_RDWR;
179 return pte;
180}
181
182static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
183{
184 pmd_val(pmd) |= PMD_S2_RDWR;
185 return pmd;
186}
187
188static inline pte_t kvm_s2pte_mkexec(pte_t pte)
189{
190 pte_val(pte) &= ~PTE_S2_XN;
191 return pte;
192}
193
194static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
195{
196 pmd_val(pmd) &= ~PMD_S2_XN;
197 return pmd;
198}
199
200static inline void kvm_set_s2pte_readonly(pte_t *ptep)
201{
202 pteval_t old_pteval, pteval;
203
204 pteval = READ_ONCE(pte_val(*ptep));
205 do {
206 old_pteval = pteval;
207 pteval &= ~PTE_S2_RDWR;
208 pteval |= PTE_S2_RDONLY;
209 pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
210 } while (pteval != old_pteval);
211}
212
213static inline bool kvm_s2pte_readonly(pte_t *ptep)
214{
215 return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
216}
217
218static inline bool kvm_s2pte_exec(pte_t *ptep)
219{
220 return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN);
221}
222
223static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
224{
225 kvm_set_s2pte_readonly((pte_t *)pmdp);
226}
227
228static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
229{
230 return kvm_s2pte_readonly((pte_t *)pmdp);
231}
232
233static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
234{
235 return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
236}
237
238static inline bool kvm_page_empty(void *ptr)
239{
240 struct page *ptr_page = virt_to_page(ptr);
241 return page_count(ptr_page) == 1;
242}
243
244#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
245
246#ifdef __PAGETABLE_PMD_FOLDED
247#define hyp_pmd_table_empty(pmdp) (0)
248#else
249#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
250#endif
251
252#ifdef __PAGETABLE_PUD_FOLDED
253#define hyp_pud_table_empty(pudp) (0)
254#else
255#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
256#endif
257
258struct kvm;
259
260#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
261
262static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
263{
264 return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
265}
266
267static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
268{
269 void *va = page_address(pfn_to_page(pfn));
270
271 kvm_flush_dcache_to_poc(va, size);
272}
273
274static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
275 unsigned long size)
276{
277 if (icache_is_aliasing()) {
278
279 __flush_icache_all();
280 } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
281
282 void *va = page_address(pfn_to_page(pfn));
283
284 invalidate_icache_range((unsigned long)va,
285 (unsigned long)va + size);
286 }
287}
288
289static inline void __kvm_flush_dcache_pte(pte_t pte)
290{
291 struct page *page = pte_page(pte);
292 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
293}
294
295static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
296{
297 struct page *page = pmd_page(pmd);
298 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
299}
300
301static inline void __kvm_flush_dcache_pud(pud_t pud)
302{
303 struct page *page = pud_page(pud);
304 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
305}
306
307#define kvm_virt_to_phys(x) __pa_symbol(x)
308
309void kvm_set_way_flush(struct kvm_vcpu *vcpu);
310void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
311
312static inline bool __kvm_cpu_uses_extended_idmap(void)
313{
314 return __cpu_uses_extended_idmap_level();
315}
316
317static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
318{
319 return idmap_ptrs_per_pgd;
320}
321
322
323
324
325
326
327static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
328 pgd_t *hyp_pgd,
329 pgd_t *merged_hyp_pgd,
330 unsigned long hyp_idmap_start)
331{
332 int idmap_idx;
333 u64 pgd_addr;
334
335
336
337
338
339
340 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
341 pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd));
342 merged_hyp_pgd[0] = __pgd(pgd_addr | PMD_TYPE_TABLE);
343
344
345
346
347
348
349
350 idmap_idx = hyp_idmap_start >> VA_BITS;
351 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
352 pgd_addr = __phys_to_pgd_val(__pa(boot_hyp_pgd));
353 merged_hyp_pgd[idmap_idx] = __pgd(pgd_addr | PMD_TYPE_TABLE);
354}
355
356static inline unsigned int kvm_get_vmid_bits(void)
357{
358 int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
359
360 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
361}
362
363
364
365
366
367
368static inline int kvm_read_guest_lock(struct kvm *kvm,
369 gpa_t gpa, void *data, unsigned long len)
370{
371 int srcu_idx = srcu_read_lock(&kvm->srcu);
372 int ret = kvm_read_guest(kvm, gpa, data, len);
373
374 srcu_read_unlock(&kvm->srcu, srcu_idx);
375
376 return ret;
377}
378
379#ifdef CONFIG_KVM_INDIRECT_VECTORS
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401#include <asm/mmu.h>
402
403extern void *__kvm_bp_vect_base;
404extern int __kvm_harden_el2_vector_slot;
405
406static inline void *kvm_get_hyp_vector(void)
407{
408 struct bp_hardening_data *data = arm64_get_bp_hardening_data();
409 void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
410 int slot = -1;
411
412 if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
413 vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start));
414 slot = data->hyp_vectors_slot;
415 }
416
417 if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
418 vect = __kvm_bp_vect_base;
419 if (slot == -1)
420 slot = __kvm_harden_el2_vector_slot;
421 }
422
423 if (slot != -1)
424 vect += slot * SZ_2K;
425
426 return vect;
427}
428
429
430static inline int kvm_map_vectors(void)
431{
432
433
434
435
436
437
438
439
440
441 if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
442 __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
443 __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
444 }
445
446 if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
447 phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs_start);
448 unsigned long size = (__bp_harden_hyp_vecs_end -
449 __bp_harden_hyp_vecs_start);
450
451
452
453
454
455
456 __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
457 BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
458 return create_hyp_exec_mappings(vect_pa, size,
459 &__kvm_bp_vect_base);
460 }
461
462 return 0;
463}
464#else
465static inline void *kvm_get_hyp_vector(void)
466{
467 return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
468}
469
470static inline int kvm_map_vectors(void)
471{
472 return 0;
473}
474#endif
475
476#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
477
478#endif
479#endif
480