1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#ifndef __ARM64_KVM_EMULATE_H__
23#define __ARM64_KVM_EMULATE_H__
24
25#include <linux/kvm_host.h>
26
27#include <asm/esr.h>
28#include <asm/kvm_arm.h>
29#include <asm/kvm_mmio.h>
30#include <asm/ptrace.h>
31#include <asm/cputype.h>
32#include <asm/virt.h>
33
34unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
35unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
36
37bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
38void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
39
40void kvm_inject_undefined(struct kvm_vcpu *vcpu);
41void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
42void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
43
44static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
45{
46 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
47 if (is_kernel_in_hyp_mode())
48 vcpu->arch.hcr_el2 |= HCR_E2H;
49 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
50 vcpu->arch.hcr_el2 &= ~HCR_RW;
51}
52
53static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
54{
55 return vcpu->arch.hcr_el2;
56}
57
58static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
59{
60 vcpu->arch.hcr_el2 = hcr;
61}
62
63static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
64{
65 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
66}
67
68static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
69{
70 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
71}
72
73static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
74{
75 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
76}
77
78static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
79{
80 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
81}
82
83static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
84{
85 if (vcpu_mode_is_32bit(vcpu))
86 return kvm_condition_valid32(vcpu);
87
88 return true;
89}
90
91static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
92{
93 if (vcpu_mode_is_32bit(vcpu))
94 kvm_skip_instr32(vcpu, is_wide_instr);
95 else
96 *vcpu_pc(vcpu) += 4;
97}
98
99static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
100{
101 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
102}
103
104
105
106
107
108
109static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
110 u8 reg_num)
111{
112 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
113}
114
115static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
116 unsigned long val)
117{
118 if (reg_num != 31)
119 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
120}
121
122
123static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
124{
125 if (vcpu_mode_is_32bit(vcpu))
126 return vcpu_spsr32(vcpu);
127
128 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
129}
130
131static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
132{
133 u32 mode;
134
135 if (vcpu_mode_is_32bit(vcpu)) {
136 mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
137 return mode > COMPAT_PSR_MODE_USR;
138 }
139
140 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
141
142 return mode != PSR_MODE_EL0t;
143}
144
145static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
146{
147 return vcpu->arch.fault.esr_el2;
148}
149
150static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
151{
152 return vcpu->arch.fault.far_el2;
153}
154
155static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
156{
157 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
158}
159
160static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
161{
162 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
163}
164
165static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
166{
167 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
168}
169
170static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
171{
172 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
173}
174
175static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
176{
177 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
178}
179
180static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
181{
182 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
183}
184
185static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
186{
187 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
188}
189
190static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
191{
192 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
193}
194
195static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
196{
197 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
198}
199
200static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
201{
202 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
203}
204
205
206static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
207{
208 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
209}
210
211static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
212{
213 return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT;
214}
215
216static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
217{
218 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
219}
220
221static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
222{
223 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
224}
225
226static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
227{
228 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
229}
230
231static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
232{
233 return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
234}
235
236static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
237{
238 if (vcpu_mode_is_32bit(vcpu))
239 *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
240 else
241 vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
242}
243
244static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
245{
246 if (vcpu_mode_is_32bit(vcpu))
247 return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
248
249 return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
250}
251
252static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
253 unsigned long data,
254 unsigned int len)
255{
256 if (kvm_vcpu_is_be(vcpu)) {
257 switch (len) {
258 case 1:
259 return data & 0xff;
260 case 2:
261 return be16_to_cpu(data & 0xffff);
262 case 4:
263 return be32_to_cpu(data & 0xffffffff);
264 default:
265 return be64_to_cpu(data);
266 }
267 } else {
268 switch (len) {
269 case 1:
270 return data & 0xff;
271 case 2:
272 return le16_to_cpu(data & 0xffff);
273 case 4:
274 return le32_to_cpu(data & 0xffffffff);
275 default:
276 return le64_to_cpu(data);
277 }
278 }
279
280 return data;
281}
282
283static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
284 unsigned long data,
285 unsigned int len)
286{
287 if (kvm_vcpu_is_be(vcpu)) {
288 switch (len) {
289 case 1:
290 return data & 0xff;
291 case 2:
292 return cpu_to_be16(data & 0xffff);
293 case 4:
294 return cpu_to_be32(data & 0xffffffff);
295 default:
296 return cpu_to_be64(data);
297 }
298 } else {
299 switch (len) {
300 case 1:
301 return data & 0xff;
302 case 2:
303 return cpu_to_le16(data & 0xffff);
304 case 4:
305 return cpu_to_le32(data & 0xffffffff);
306 default:
307 return cpu_to_le64(data);
308 }
309 }
310
311 return data;
312}
313
314#endif
315