1
2
3
4
5
6
7#ifndef __ARM_KVM_ASM_H__
8#define __ARM_KVM_ASM_H__
9
10#include <asm/hyp_image.h>
11#include <asm/virt.h>
12
13#define ARM_EXIT_WITH_SERROR_BIT 31
14#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
15#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
16#define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
17
18#define ARM_EXCEPTION_IRQ 0
19#define ARM_EXCEPTION_EL1_SERROR 1
20#define ARM_EXCEPTION_TRAP 2
21#define ARM_EXCEPTION_IL 3
22
23#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
24
25#define kvm_arm_exception_type \
26 {ARM_EXCEPTION_IRQ, "IRQ" }, \
27 {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \
28 {ARM_EXCEPTION_TRAP, "TRAP" }, \
29 {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
30
31
32
33
34
35#define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
36
37#define __SMCCC_WORKAROUND_1_SMC_SZ 36
38
39#define KVM_HOST_SMCCC_ID(id) \
40 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
41 ARM_SMCCC_SMC_64, \
42 ARM_SMCCC_OWNER_VENDOR_HYP, \
43 (id))
44
45#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
46
47#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0
48#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run 1
49#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
50#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
51#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
52#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid 5
53#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
54#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
55#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2 8
56#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9
57#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10
58#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11
59#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 12
60#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13
61#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14
62
63#ifndef __ASSEMBLY__
64
65#include <linux/mm.h>
66
67#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
68#define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
69
70
71
72
73
74#define DECLARE_KVM_HYP_SYM(sym) \
75 DECLARE_KVM_VHE_SYM(sym); \
76 DECLARE_KVM_NVHE_SYM(sym)
77
78#define DECLARE_KVM_VHE_PER_CPU(type, sym) \
79 DECLARE_PER_CPU(type, sym)
80#define DECLARE_KVM_NVHE_PER_CPU(type, sym) \
81 DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
82
83#define DECLARE_KVM_HYP_PER_CPU(type, sym) \
84 DECLARE_KVM_VHE_PER_CPU(type, sym); \
85 DECLARE_KVM_NVHE_PER_CPU(type, sym)
86
87
88
89
90
91#define this_cpu_ptr_nvhe_sym(sym) per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
92#define per_cpu_ptr_nvhe_sym(sym, cpu) \
93 ({ \
94 unsigned long base, off; \
95 base = kvm_arm_hyp_percpu_base[cpu]; \
96 off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \
97 (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \
98 base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \
99 })
100
101#if defined(__KVM_NVHE_HYPERVISOR__)
102
103#define CHOOSE_NVHE_SYM(sym) sym
104#define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym)
105
106
107extern void *__nvhe_undefined_symbol;
108#define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol
109#define this_cpu_ptr_hyp_sym(sym) (&__nvhe_undefined_symbol)
110#define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol)
111
112#elif defined(__KVM_VHE_HYPERVISOR__)
113
114#define CHOOSE_VHE_SYM(sym) sym
115#define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym)
116
117
118extern void *__vhe_undefined_symbol;
119#define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol
120#define this_cpu_ptr_hyp_sym(sym) (&__vhe_undefined_symbol)
121#define per_cpu_ptr_hyp_sym(sym, cpu) (&__vhe_undefined_symbol)
122
123#else
124
125
126
127
128
129
130
131
132
133
134
135
136#define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() \
137 ? CHOOSE_VHE_SYM(sym) \
138 : CHOOSE_NVHE_SYM(sym))
139
140#define this_cpu_ptr_hyp_sym(sym) (is_kernel_in_hyp_mode() \
141 ? this_cpu_ptr(&sym) \
142 : this_cpu_ptr_nvhe_sym(sym))
143
144#define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode() \
145 ? per_cpu_ptr(&sym, cpu) \
146 : per_cpu_ptr_nvhe_sym(sym, cpu))
147
148#define CHOOSE_VHE_SYM(sym) sym
149#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
150
151#endif
152
153
154#define kvm_ksym_ref(ptr) \
155 ({ \
156 void *val = (ptr); \
157 if (!is_kernel_in_hyp_mode()) \
158 val = lm_alias((ptr)); \
159 val; \
160 })
161#define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym))
162
163struct kvm;
164struct kvm_vcpu;
165struct kvm_s2_mmu;
166
167DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
168DECLARE_KVM_NVHE_SYM(__kvm_hyp_host_vector);
169DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
170#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
171#define __kvm_hyp_host_vector CHOOSE_NVHE_SYM(__kvm_hyp_host_vector)
172#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
173
174extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
175DECLARE_KVM_NVHE_SYM(__per_cpu_start);
176DECLARE_KVM_NVHE_SYM(__per_cpu_end);
177
178extern atomic_t arm64_el2_vector_last_slot;
179DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
180#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
181
182extern void __kvm_flush_vm_context(void);
183extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
184 int level);
185extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
186extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
187
188extern void __kvm_timer_set_cntvoff(u64 cntvoff);
189
190extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
191
192extern void __kvm_enable_ssbs(void);
193
194extern u64 __vgic_v3_get_ich_vtr_el2(void);
195extern u64 __vgic_v3_read_vmcr(void);
196extern void __vgic_v3_write_vmcr(u32 vmcr);
197extern void __vgic_v3_init_lrs(void);
198
199extern u32 __kvm_get_mdcr_el2(void);
200
201extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
202
203
204
205
206
207
208
209
210
211
212
213
214#define hyp_symbol_addr(s) \
215 ({ \
216 typeof(s) *addr; \
217 asm("adrp %0, %1\n" \
218 "add %0, %0, :lo12:%1\n" \
219 : "=r" (addr) : "S" (&s)); \
220 addr; \
221 })
222
223#define __KVM_EXTABLE(from, to) \
224 " .pushsection __kvm_ex_table, \"a\"\n" \
225 " .align 3\n" \
226 " .long (" #from " - .), (" #to " - .)\n" \
227 " .popsection\n"
228
229
230#define __kvm_at(at_op, addr) \
231( { \
232 int __kvm_at_err = 0; \
233 u64 spsr, elr; \
234 asm volatile( \
235 " mrs %1, spsr_el2\n" \
236 " mrs %2, elr_el2\n" \
237 "1: at "at_op", %3\n" \
238 " isb\n" \
239 " b 9f\n" \
240 "2: msr spsr_el2, %1\n" \
241 " msr elr_el2, %2\n" \
242 " mov %w0, %4\n" \
243 "9:\n" \
244 __KVM_EXTABLE(1b, 2b) \
245 : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
246 : "r" (addr), "i" (-EFAULT)); \
247 __kvm_at_err; \
248} )
249
250
251#else
252
253.macro get_host_ctxt reg, tmp
254 adr_this_cpu \reg, kvm_host_data, \tmp
255 add \reg, \reg, #HOST_DATA_CONTEXT
256.endm
257
258.macro get_vcpu_ptr vcpu, ctxt
259 get_host_ctxt \ctxt, \vcpu
260 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
261.endm
262
263.macro get_loaded_vcpu vcpu, ctxt
264 adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
265 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
266.endm
267
268.macro set_loaded_vcpu vcpu, ctxt, tmp
269 adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
270 str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
271.endm
272
273
274
275
276
277
278
279
280
281.macro _kvm_extable, from, to
282 .pushsection __kvm_ex_table, "a"
283 .align 3
284 .long (\from - .), (\to - .)
285 .popsection
286.endm
287
288#define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x)
289#define CPU_LR_OFFSET CPU_XREG_OFFSET(30)
290#define CPU_SP_EL0_OFFSET (CPU_LR_OFFSET + 8)
291
292
293
294
295
296.macro save_callee_saved_regs ctxt
297 str x18, [\ctxt, #CPU_XREG_OFFSET(18)]
298 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
299 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
300 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
301 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
302 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
303 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
304.endm
305
306.macro restore_callee_saved_regs ctxt
307
308 ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)]
309 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
310 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
311 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
312 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
313 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
314 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
315.endm
316
317.macro save_sp_el0 ctxt, tmp
318 mrs \tmp, sp_el0
319 str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
320.endm
321
322.macro restore_sp_el0 ctxt, tmp
323 ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
324 msr sp_el0, \tmp
325.endm
326
327#endif
328
329#endif
330