1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef __ARM64_KVM_HYP_H__
19#define __ARM64_KVM_HYP_H__
20
21#include <linux/compiler.h>
22#include <linux/kvm_host.h>
23#include <asm/alternative.h>
24#include <asm/sysreg.h>
25
26#define __hyp_text __section(.hyp.text) notrace
27
28#define read_sysreg_elx(r,nvh,vh) \
29 ({ \
30 u64 reg; \
31 asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\
32 "mrs_s %0, " __stringify(r##vh),\
33 ARM64_HAS_VIRT_HOST_EXTN) \
34 : "=r" (reg)); \
35 reg; \
36 })
37
38#define write_sysreg_elx(v,r,nvh,vh) \
39 do { \
40 u64 __val = (u64)(v); \
41 asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\
42 "msr_s " __stringify(r##vh) ", %x0",\
43 ARM64_HAS_VIRT_HOST_EXTN) \
44 : : "rZ" (__val)); \
45 } while (0)
46
47
48
49
50
51
52#define read_sysreg_el2(r) \
53 ({ \
54 u64 reg; \
55 asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\
56 "mrs %0, " __stringify(r##_EL1),\
57 ARM64_HAS_VIRT_HOST_EXTN) \
58 : "=r" (reg)); \
59 reg; \
60 })
61
62#define write_sysreg_el2(v,r) \
63 do { \
64 u64 __val = (u64)(v); \
65 asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\
66 "msr " __stringify(r##_EL1) ", %x0",\
67 ARM64_HAS_VIRT_HOST_EXTN) \
68 : : "rZ" (__val)); \
69 } while (0)
70
71#define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02)
72#define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02)
73#define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12)
74#define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12)
75
76
77#define sctlr_EL12 sys_reg(3, 5, 1, 0, 0)
78#define cpacr_EL12 sys_reg(3, 5, 1, 0, 2)
79#define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0)
80#define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1)
81#define tcr_EL12 sys_reg(3, 5, 2, 0, 2)
82#define afsr0_EL12 sys_reg(3, 5, 5, 1, 0)
83#define afsr1_EL12 sys_reg(3, 5, 5, 1, 1)
84#define esr_EL12 sys_reg(3, 5, 5, 2, 0)
85#define far_EL12 sys_reg(3, 5, 6, 0, 0)
86#define mair_EL12 sys_reg(3, 5, 10, 2, 0)
87#define amair_EL12 sys_reg(3, 5, 10, 3, 0)
88#define vbar_EL12 sys_reg(3, 5, 12, 0, 0)
89#define contextidr_EL12 sys_reg(3, 5, 13, 0, 1)
90#define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0)
91#define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0)
92#define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1)
93#define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2)
94#define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0)
95#define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1)
96#define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2)
97#define spsr_EL12 sys_reg(3, 5, 4, 0, 0)
98#define elr_EL12 sys_reg(3, 5, 4, 0, 1)
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113#define hyp_alternate_select(fname, orig, alt, cond) \
114typeof(orig) * __hyp_text fname(void) \
115{ \
116 typeof(alt) *val = orig; \
117 asm volatile(ALTERNATIVE("nop \n", \
118 "mov %0, %1 \n", \
119 cond) \
120 : "+r" (val) : "r" (alt)); \
121 return val; \
122}
123
124int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
125
126void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
127void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
128void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
129void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
130void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
131void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
132int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
133
134void __timer_enable_traps(struct kvm_vcpu *vcpu);
135void __timer_disable_traps(struct kvm_vcpu *vcpu);
136
137void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
138void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
139void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
140void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
141void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
142void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
143void __sysreg32_save_state(struct kvm_vcpu *vcpu);
144void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
145
146void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
147void __debug_switch_to_host(struct kvm_vcpu *vcpu);
148
149void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
150void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
151bool __fpsimd_enabled(void);
152
153void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
154void deactivate_traps_vhe_put(void);
155
156u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
157void __noreturn __hyp_do_panic(unsigned long, ...);
158
159
160
161
162
163static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
164{
165 write_sysreg(kvm->arch.vtcr, vtcr_el2);
166 write_sysreg(kvm->arch.vttbr, vttbr_el2);
167
168
169
170
171
172
173 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
174}
175
176#endif
177
178