1
2
3
4
5
6
7#ifndef __ARM64_KVM_MMU_H__
8#define __ARM64_KVM_MMU_H__
9
10#include <asm/page.h>
11#include <asm/memory.h>
12#include <asm/cpufeature.h>
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61#ifdef __ASSEMBLY__
62
63#include <asm/alternative.h>
64
65
66
67
68
69
70
71
72
73
74.macro kern_hyp_va reg
75alternative_cb kvm_update_va_mask
76 and \reg, \reg, #1
77 ror \reg, \reg, #1
78 add \reg, \reg, #0
79 add \reg, \reg, #0, lsl 12
80 ror \reg, \reg, #63
81alternative_cb_end
82.endm
83
84#else
85
86#include <linux/pgtable.h>
87#include <asm/pgalloc.h>
88#include <asm/cache.h>
89#include <asm/cacheflush.h>
90#include <asm/mmu_context.h>
91
92void kvm_update_va_mask(struct alt_instr *alt,
93 __le32 *origptr, __le32 *updptr, int nr_inst);
94void kvm_compute_layout(void);
95
96static __always_inline unsigned long __kern_hyp_va(unsigned long v)
97{
98 asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
99 "ror %0, %0, #1\n"
100 "add %0, %0, #0\n"
101 "add %0, %0, #0, lsl 12\n"
102 "ror %0, %0, #63\n",
103 kvm_update_va_mask)
104 : "+r" (v));
105 return v;
106}
107
108#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
109
110
111
112
113
114#define KVM_PHYS_SHIFT (40)
115
116#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr)
117#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm))
118#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL))
119
120static inline bool kvm_page_empty(void *ptr)
121{
122 struct page *ptr_page = virt_to_page(ptr);
123 return page_count(ptr_page) == 1;
124}
125
126#include <asm/stage2_pgtable.h>
127
128int create_hyp_mappings(void *from, void *to, pgprot_t prot);
129int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
130 void __iomem **kaddr,
131 void __iomem **haddr);
132int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
133 void **haddr);
134void free_hyp_pgds(void);
135
136void stage2_unmap_vm(struct kvm *kvm);
137int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
138void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
139int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
140 phys_addr_t pa, unsigned long size, bool writable);
141
142int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
143
144void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
145
146phys_addr_t kvm_mmu_get_httbr(void);
147phys_addr_t kvm_get_idmap_vector(void);
148int kvm_mmu_init(void);
149void kvm_clear_hyp_idmap(void);
150
151#define kvm_mk_pmd(ptep) \
152 __pmd(__phys_to_pmd_val(__pa(ptep)) | PMD_TYPE_TABLE)
153#define kvm_mk_pud(pmdp) \
154 __pud(__phys_to_pud_val(__pa(pmdp)) | PMD_TYPE_TABLE)
155#define kvm_mk_p4d(pmdp) \
156 __p4d(__phys_to_p4d_val(__pa(pmdp)) | PUD_TYPE_TABLE)
157
158#define kvm_set_pud(pudp, pud) set_pud(pudp, pud)
159
160#define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot)
161#define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot)
162#define kvm_pfn_pud(pfn, prot) pfn_pud(pfn, prot)
163
164#define kvm_pud_pfn(pud) pud_pfn(pud)
165
166#define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd)
167#define kvm_pud_mkhuge(pud) pud_mkhuge(pud)
168
169static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
170{
171 pte_val(pte) |= PTE_S2_RDWR;
172 return pte;
173}
174
175static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
176{
177 pmd_val(pmd) |= PMD_S2_RDWR;
178 return pmd;
179}
180
181static inline pud_t kvm_s2pud_mkwrite(pud_t pud)
182{
183 pud_val(pud) |= PUD_S2_RDWR;
184 return pud;
185}
186
187static inline pte_t kvm_s2pte_mkexec(pte_t pte)
188{
189 pte_val(pte) &= ~PTE_S2_XN;
190 return pte;
191}
192
193static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
194{
195 pmd_val(pmd) &= ~PMD_S2_XN;
196 return pmd;
197}
198
199static inline pud_t kvm_s2pud_mkexec(pud_t pud)
200{
201 pud_val(pud) &= ~PUD_S2_XN;
202 return pud;
203}
204
205static inline void kvm_set_s2pte_readonly(pte_t *ptep)
206{
207 pteval_t old_pteval, pteval;
208
209 pteval = READ_ONCE(pte_val(*ptep));
210 do {
211 old_pteval = pteval;
212 pteval &= ~PTE_S2_RDWR;
213 pteval |= PTE_S2_RDONLY;
214 pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
215 } while (pteval != old_pteval);
216}
217
218static inline bool kvm_s2pte_readonly(pte_t *ptep)
219{
220 return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
221}
222
223static inline bool kvm_s2pte_exec(pte_t *ptep)
224{
225 return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN);
226}
227
228static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
229{
230 kvm_set_s2pte_readonly((pte_t *)pmdp);
231}
232
233static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
234{
235 return kvm_s2pte_readonly((pte_t *)pmdp);
236}
237
238static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
239{
240 return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
241}
242
243static inline void kvm_set_s2pud_readonly(pud_t *pudp)
244{
245 kvm_set_s2pte_readonly((pte_t *)pudp);
246}
247
248static inline bool kvm_s2pud_readonly(pud_t *pudp)
249{
250 return kvm_s2pte_readonly((pte_t *)pudp);
251}
252
253static inline bool kvm_s2pud_exec(pud_t *pudp)
254{
255 return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN);
256}
257
258static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
259{
260 return pud_mkyoung(pud);
261}
262
263static inline bool kvm_s2pud_young(pud_t pud)
264{
265 return pud_young(pud);
266}
267
268#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
269
270#ifdef __PAGETABLE_PMD_FOLDED
271#define hyp_pmd_table_empty(pmdp) (0)
272#else
273#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
274#endif
275
276#ifdef __PAGETABLE_PUD_FOLDED
277#define hyp_pud_table_empty(pudp) (0)
278#else
279#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
280#endif
281
282#ifdef __PAGETABLE_P4D_FOLDED
283#define hyp_p4d_table_empty(p4dp) (0)
284#else
285#define hyp_p4d_table_empty(p4dp) kvm_page_empty(p4dp)
286#endif
287
288struct kvm;
289
290#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
291
292static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
293{
294 return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
295}
296
297static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
298{
299 void *va = page_address(pfn_to_page(pfn));
300
301
302
303
304
305
306
307 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
308 return;
309
310 kvm_flush_dcache_to_poc(va, size);
311}
312
313static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
314 unsigned long size)
315{
316 if (icache_is_aliasing()) {
317
318 __flush_icache_all();
319 } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
320
321 void *va = page_address(pfn_to_page(pfn));
322
323 invalidate_icache_range((unsigned long)va,
324 (unsigned long)va + size);
325 }
326}
327
328static inline void __kvm_flush_dcache_pte(pte_t pte)
329{
330 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
331 struct page *page = pte_page(pte);
332 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
333 }
334}
335
336static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
337{
338 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
339 struct page *page = pmd_page(pmd);
340 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
341 }
342}
343
344static inline void __kvm_flush_dcache_pud(pud_t pud)
345{
346 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
347 struct page *page = pud_page(pud);
348 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
349 }
350}
351
352void kvm_set_way_flush(struct kvm_vcpu *vcpu);
353void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
354
355static inline bool __kvm_cpu_uses_extended_idmap(void)
356{
357 return __cpu_uses_extended_idmap_level();
358}
359
360static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
361{
362 return idmap_ptrs_per_pgd;
363}
364
365
366
367
368
369
370static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
371 pgd_t *hyp_pgd,
372 pgd_t *merged_hyp_pgd,
373 unsigned long hyp_idmap_start)
374{
375 int idmap_idx;
376 u64 pgd_addr;
377
378
379
380
381
382
383 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
384 pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd));
385 merged_hyp_pgd[0] = __pgd(pgd_addr | PMD_TYPE_TABLE);
386
387
388
389
390
391
392
393 idmap_idx = hyp_idmap_start >> VA_BITS;
394 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
395 pgd_addr = __phys_to_pgd_val(__pa(boot_hyp_pgd));
396 merged_hyp_pgd[idmap_idx] = __pgd(pgd_addr | PMD_TYPE_TABLE);
397}
398
399static inline unsigned int kvm_get_vmid_bits(void)
400{
401 int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
402
403 return get_vmid_bits(reg);
404}
405
406
407
408
409
410
411static inline int kvm_read_guest_lock(struct kvm *kvm,
412 gpa_t gpa, void *data, unsigned long len)
413{
414 int srcu_idx = srcu_read_lock(&kvm->srcu);
415 int ret = kvm_read_guest(kvm, gpa, data, len);
416
417 srcu_read_unlock(&kvm->srcu, srcu_idx);
418
419 return ret;
420}
421
422static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
423 const void *data, unsigned long len)
424{
425 int srcu_idx = srcu_read_lock(&kvm->srcu);
426 int ret = kvm_write_guest(kvm, gpa, data, len);
427
428 srcu_read_unlock(&kvm->srcu, srcu_idx);
429
430 return ret;
431}
432
433#ifdef CONFIG_KVM_INDIRECT_VECTORS
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455#include <asm/mmu.h>
456
457extern void *__kvm_bp_vect_base;
458extern int __kvm_harden_el2_vector_slot;
459
460
461static inline void *kvm_get_hyp_vector(void)
462{
463 struct bp_hardening_data *data = arm64_get_bp_hardening_data();
464 void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
465 int slot = -1;
466
467 if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
468 vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
469 slot = data->hyp_vectors_slot;
470 }
471
472 if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
473 vect = __kvm_bp_vect_base;
474 if (slot == -1)
475 slot = __kvm_harden_el2_vector_slot;
476 }
477
478 if (slot != -1)
479 vect += slot * SZ_2K;
480
481 return vect;
482}
483
484
485static inline int kvm_map_vectors(void)
486{
487
488
489
490
491
492
493
494
495
496 if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
497 __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
498 __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
499 }
500
501 if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
502 phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
503 unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
504
505
506
507
508
509
510 __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
511 BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
512 return create_hyp_exec_mappings(vect_pa, size,
513 &__kvm_bp_vect_base);
514 }
515
516 return 0;
517}
518#else
519static inline void *kvm_get_hyp_vector(void)
520{
521 return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
522}
523
524static inline int kvm_map_vectors(void)
525{
526 return 0;
527}
528#endif
529
530#ifdef CONFIG_ARM64_SSBD
531DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
532
533static inline int hyp_map_aux_data(void)
534{
535 int cpu, err;
536
537 for_each_possible_cpu(cpu) {
538 u64 *ptr;
539
540 ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
541 err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
542 if (err)
543 return err;
544 }
545 return 0;
546}
547#else
548static inline int hyp_map_aux_data(void)
549{
550 return 0;
551}
552#endif
553
554#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
555
556
557
558
559
560
561static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels)
562{
563 int x = ARM64_VTTBR_X(ipa_shift, levels);
564
565 return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x;
566}
567
568static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels)
569{
570 unsigned int x = arm64_vttbr_x(ipa_shift, levels);
571
572 return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x);
573}
574
575static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
576{
577 return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
578}
579
580static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
581{
582 struct kvm_vmid *vmid = &mmu->vmid;
583 u64 vmid_field, baddr;
584 u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
585
586 baddr = mmu->pgd_phys;
587 vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
588 return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
589}
590
591
592
593
594
595static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
596{
597 write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2);
598 write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
599
600
601
602
603
604
605 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
606}
607
608#endif
609#endif
610