1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#ifndef __ARM64_KVM_EMULATE_H__
23#define __ARM64_KVM_EMULATE_H__
24
25#include <linux/kvm_host.h>
26
27#include <asm/esr.h>
28#include <asm/kvm_arm.h>
29#include <asm/kvm_mmio.h>
30#include <asm/ptrace.h>
31#include <asm/cputype.h>
32#include <asm/virt.h>
33
34unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
35unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
36
37bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
38void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
39
40void kvm_inject_undefined(struct kvm_vcpu *vcpu);
41void kvm_inject_vabt(struct kvm_vcpu *vcpu);
42void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
43void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
44
45static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
46{
47 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
48 if (is_kernel_in_hyp_mode())
49 vcpu->arch.hcr_el2 |= HCR_E2H;
50 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
51 vcpu->arch.hcr_el2 &= ~HCR_RW;
52}
53
54static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
55{
56 return vcpu->arch.hcr_el2;
57}
58
59static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
60{
61 vcpu->arch.hcr_el2 = hcr;
62}
63
64static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
65{
66 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
67}
68
69static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
70{
71 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
72}
73
74static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
75{
76 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
77}
78
79static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
80{
81 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
82}
83
84static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
85{
86 if (vcpu_mode_is_32bit(vcpu))
87 return kvm_condition_valid32(vcpu);
88
89 return true;
90}
91
92static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
93{
94 if (vcpu_mode_is_32bit(vcpu))
95 kvm_skip_instr32(vcpu, is_wide_instr);
96 else
97 *vcpu_pc(vcpu) += 4;
98}
99
100static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
101{
102 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
103}
104
105
106
107
108
109
110static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
111 u8 reg_num)
112{
113 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
114}
115
116static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
117 unsigned long val)
118{
119 if (reg_num != 31)
120 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
121}
122
123
124static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
125{
126 if (vcpu_mode_is_32bit(vcpu))
127 return vcpu_spsr32(vcpu);
128
129 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
130}
131
132static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
133{
134 u32 mode;
135
136 if (vcpu_mode_is_32bit(vcpu)) {
137 mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
138 return mode > COMPAT_PSR_MODE_USR;
139 }
140
141 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
142
143 return mode != PSR_MODE_EL0t;
144}
145
146static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
147{
148 return vcpu->arch.fault.esr_el2;
149}
150
151static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
152{
153 u32 esr = kvm_vcpu_get_hsr(vcpu);
154
155 if (esr & ESR_ELx_CV)
156 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
157
158 return -1;
159}
160
161static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
162{
163 return vcpu->arch.fault.far_el2;
164}
165
166static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
167{
168 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
169}
170
171static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
172{
173 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
174}
175
176static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
177{
178 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
179}
180
181static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
182{
183 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
184}
185
186static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
187{
188 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
189}
190
191static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
192{
193 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
194}
195
196static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
197{
198 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
199}
200
201static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
202{
203 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
204 kvm_vcpu_dabt_iss1tw(vcpu);
205}
206
207static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
208{
209 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
210}
211
212static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
213{
214 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
215}
216
217
218static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
219{
220 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
221}
222
223static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
224{
225 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
226}
227
228static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
229{
230 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
231}
232
233static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
234{
235 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
236}
237
238static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
239{
240 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
241}
242
243static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
244{
245 u32 esr = kvm_vcpu_get_hsr(vcpu);
246 return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
247}
248
249static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
250{
251 return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
252}
253
254static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
255{
256 if (vcpu_mode_is_32bit(vcpu))
257 *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
258 else
259 vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
260}
261
262static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
263{
264 if (vcpu_mode_is_32bit(vcpu))
265 return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
266
267 return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
268}
269
270static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
271 unsigned long data,
272 unsigned int len)
273{
274 if (kvm_vcpu_is_be(vcpu)) {
275 switch (len) {
276 case 1:
277 return data & 0xff;
278 case 2:
279 return be16_to_cpu(data & 0xffff);
280 case 4:
281 return be32_to_cpu(data & 0xffffffff);
282 default:
283 return be64_to_cpu(data);
284 }
285 } else {
286 switch (len) {
287 case 1:
288 return data & 0xff;
289 case 2:
290 return le16_to_cpu(data & 0xffff);
291 case 4:
292 return le32_to_cpu(data & 0xffffffff);
293 default:
294 return le64_to_cpu(data);
295 }
296 }
297
298 return data;
299}
300
301static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
302 unsigned long data,
303 unsigned int len)
304{
305 if (kvm_vcpu_is_be(vcpu)) {
306 switch (len) {
307 case 1:
308 return data & 0xff;
309 case 2:
310 return cpu_to_be16(data & 0xffff);
311 case 4:
312 return cpu_to_be32(data & 0xffffffff);
313 default:
314 return cpu_to_be64(data);
315 }
316 } else {
317 switch (len) {
318 case 1:
319 return data & 0xff;
320 case 2:
321 return cpu_to_le16(data & 0xffff);
322 case 4:
323 return cpu_to_le32(data & 0xffffffff);
324 default:
325 return cpu_to_le64(data);
326 }
327 }
328
329 return data;
330}
331
332#endif
333