1
2
3
4
5
6
7#ifndef __ARM64_KVM_MMU_H__
8#define __ARM64_KVM_MMU_H__
9
10#include <asm/page.h>
11#include <asm/memory.h>
12#include <asm/mmu.h>
13#include <asm/cpufeature.h>
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#ifdef __ASSEMBLY__
53
54#include <asm/alternative.h>
55
56
57
58
59
60
61
62
63
64
65.macro kern_hyp_va reg
66alternative_cb kvm_update_va_mask
67 and \reg, \reg, #1
68 ror \reg, \reg, #1
69 add \reg, \reg, #0
70 add \reg, \reg, #0, lsl 12
71 ror \reg, \reg, #63
72alternative_cb_end
73.endm
74
75
76
77
78
79
80.macro hyp_pa reg, tmp
81 ldr_l \tmp, hyp_physvirt_offset
82 add \reg, \reg, \tmp
83.endm
84
85
86
87
88
89
90
91
92
93
94
95.macro hyp_kimg_va reg, tmp
96
97 hyp_pa \reg, \tmp
98
99
100alternative_cb kvm_get_kimage_voffset
101 movz \tmp, #0
102 movk \tmp, #0, lsl #16
103 movk \tmp, #0, lsl #32
104 movk \tmp, #0, lsl #48
105alternative_cb_end
106
107
108 add \reg, \reg, \tmp
109.endm
110
111#else
112
113#include <linux/pgtable.h>
114#include <asm/pgalloc.h>
115#include <asm/cache.h>
116#include <asm/cacheflush.h>
117#include <asm/mmu_context.h>
118
119void kvm_update_va_mask(struct alt_instr *alt,
120 __le32 *origptr, __le32 *updptr, int nr_inst);
121void kvm_compute_layout(void);
122void kvm_apply_hyp_relocations(void);
123
124#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
125
126static __always_inline unsigned long __kern_hyp_va(unsigned long v)
127{
128 asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
129 "ror %0, %0, #1\n"
130 "add %0, %0, #0\n"
131 "add %0, %0, #0, lsl 12\n"
132 "ror %0, %0, #63\n",
133 kvm_update_va_mask)
134 : "+r" (v));
135 return v;
136}
137
138#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
139
140
141
142
143
144#define KVM_PHYS_SHIFT (40)
145
146#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr)
147#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm))
148#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL))
149
150#include <asm/kvm_pgtable.h>
151#include <asm/stage2_pgtable.h>
152
153int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
154int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
155 void __iomem **kaddr,
156 void __iomem **haddr);
157int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
158 void **haddr);
159void free_hyp_pgds(void);
160
161void stage2_unmap_vm(struct kvm *kvm);
162int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
163void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
164int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
165 phys_addr_t pa, unsigned long size, bool writable);
166
167int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
168
169phys_addr_t kvm_mmu_get_httbr(void);
170phys_addr_t kvm_get_idmap_vector(void);
171int kvm_mmu_init(u32 *hyp_va_bits);
172
173static inline void *__kvm_vector_slot2addr(void *base,
174 enum arm64_hyp_spectre_vector slot)
175{
176 int idx = slot - (slot != HYP_VECTOR_DIRECT);
177
178 return base + (idx * SZ_2K);
179}
180
181struct kvm;
182
183#define kvm_flush_dcache_to_poc(a,l) \
184 dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
185
186static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
187{
188 return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
189}
190
191static inline void __clean_dcache_guest_page(void *va, size_t size)
192{
193
194
195
196
197
198
199 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
200 return;
201
202 kvm_flush_dcache_to_poc(va, size);
203}
204
205static inline void __invalidate_icache_guest_page(void *va, size_t size)
206{
207 if (icache_is_aliasing()) {
208
209 icache_inval_all_pou();
210 } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
211
212 icache_inval_pou((unsigned long)va, (unsigned long)va + size);
213 }
214}
215
216void kvm_set_way_flush(struct kvm_vcpu *vcpu);
217void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
218
219static inline unsigned int kvm_get_vmid_bits(void)
220{
221 int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
222
223 return get_vmid_bits(reg);
224}
225
226
227
228
229
230
231static inline int kvm_read_guest_lock(struct kvm *kvm,
232 gpa_t gpa, void *data, unsigned long len)
233{
234 int srcu_idx = srcu_read_lock(&kvm->srcu);
235 int ret = kvm_read_guest(kvm, gpa, data, len);
236
237 srcu_read_unlock(&kvm->srcu, srcu_idx);
238
239 return ret;
240}
241
242static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
243 const void *data, unsigned long len)
244{
245 int srcu_idx = srcu_read_lock(&kvm->srcu);
246 int ret = kvm_write_guest(kvm, gpa, data, len);
247
248 srcu_read_unlock(&kvm->srcu, srcu_idx);
249
250 return ret;
251}
252
253#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
254
255
256
257
258
259
260static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
261{
262 struct kvm_vmid *vmid = &mmu->vmid;
263 u64 vmid_field, baddr;
264 u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
265
266 baddr = mmu->pgd_phys;
267 vmid_field = (u64)READ_ONCE(vmid->vmid) << VTTBR_VMID_SHIFT;
268 return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
269}
270
271
272
273
274
275static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
276 struct kvm_arch *arch)
277{
278 write_sysreg(arch->vtcr, vtcr_el2);
279 write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
280
281
282
283
284
285
286 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
287}
288
289static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
290{
291 return container_of(mmu->arch, struct kvm, arch);
292}
293#endif
294#endif
295