1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef __ARM_KVM_EMULATE_H__
20#define __ARM_KVM_EMULATE_H__
21
22#include <linux/kvm_host.h>
23#include <asm/kvm_asm.h>
24#include <asm/kvm_mmio.h>
25#include <asm/kvm_arm.h>
26#include <asm/cputype.h>
27
28
29#define PSR_AA32_MODE_ABT ABT_MODE
30#define PSR_AA32_MODE_UND UND_MODE
31#define PSR_AA32_T_BIT PSR_T_BIT
32#define PSR_AA32_I_BIT PSR_I_BIT
33#define PSR_AA32_A_BIT PSR_A_BIT
34#define PSR_AA32_E_BIT PSR_E_BIT
35#define PSR_AA32_IT_MASK PSR_IT_MASK
36
37unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
38
39static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
40{
41 return vcpu_reg(vcpu, reg_num);
42}
43
44unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);
45
46static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
47{
48 return *__vcpu_spsr(vcpu);
49}
50
51static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
52{
53 *__vcpu_spsr(vcpu) = v;
54}
55
56static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
57 u8 reg_num)
58{
59 return *vcpu_reg(vcpu, reg_num);
60}
61
62static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
63 unsigned long val)
64{
65 *vcpu_reg(vcpu, reg_num) = val;
66}
67
68bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
69void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
70void kvm_inject_undef32(struct kvm_vcpu *vcpu);
71void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
72void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
73void kvm_inject_vabt(struct kvm_vcpu *vcpu);
74
75static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu)
76{
77 kvm_inject_undef32(vcpu);
78}
79
80static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
81{
82 kvm_inject_dabt32(vcpu, addr);
83}
84
85static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
86{
87 kvm_inject_pabt32(vcpu, addr);
88}
89
90static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
91{
92 return kvm_condition_valid32(vcpu);
93}
94
95static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
96{
97 kvm_skip_instr32(vcpu, is_wide_instr);
98}
99
100static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
101{
102 vcpu->arch.hcr = HCR_GUEST_MASK;
103}
104
105static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
106{
107 return (unsigned long *)&vcpu->arch.hcr;
108}
109
110static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
111{
112 vcpu->arch.hcr &= ~HCR_TWE;
113}
114
115static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
116{
117 vcpu->arch.hcr |= HCR_TWE;
118}
119
120static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
121{
122 return true;
123}
124
125static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
126{
127 return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
128}
129
130static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
131{
132 return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
133}
134
135static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
136{
137 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
138}
139
140static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
141{
142 unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
143 return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
144}
145
146static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
147{
148 unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
149 return cpsr_mode > USR_MODE;
150}
151
152static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
153{
154 return vcpu->arch.fault.hsr;
155}
156
157static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
158{
159 u32 hsr = kvm_vcpu_get_hsr(vcpu);
160
161 if (hsr & HSR_CV)
162 return (hsr & HSR_COND) >> HSR_COND_SHIFT;
163
164 return -1;
165}
166
167static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
168{
169 return vcpu->arch.fault.hxfar;
170}
171
172static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
173{
174 return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
175}
176
177static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
178{
179 return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
180}
181
182static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
183{
184 return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
185}
186
187static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
188{
189 return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
190}
191
192static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
193{
194 return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
195}
196
197static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
198{
199 return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
200}
201
202static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
203{
204 return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
205}
206
207
208static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
209{
210 switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
211 case 0:
212 return 1;
213 case 1:
214 return 2;
215 case 2:
216 return 4;
217 default:
218 kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
219 return -EFAULT;
220 }
221}
222
223
224static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
225{
226 return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
227}
228
229static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
230{
231 return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
232}
233
234static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
235{
236 return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
237}
238
239static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
240{
241 return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
242}
243
244static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
245{
246 return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
247}
248
249static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
250{
251 switch (kvm_vcpu_trap_get_fault(vcpu)) {
252 case FSC_SEA:
253 case FSC_SEA_TTW0:
254 case FSC_SEA_TTW1:
255 case FSC_SEA_TTW2:
256 case FSC_SEA_TTW3:
257 case FSC_SECC:
258 case FSC_SECC_TTW0:
259 case FSC_SECC_TTW1:
260 case FSC_SECC_TTW2:
261 case FSC_SECC_TTW3:
262 return true;
263 default:
264 return false;
265 }
266}
267
268static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
269{
270 if (kvm_vcpu_trap_is_iabt(vcpu))
271 return false;
272
273 return kvm_vcpu_dabt_iswrite(vcpu);
274}
275
276static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
277{
278 return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
279}
280
281static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
282{
283 return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
284}
285
286static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
287{
288 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
289}
290
291static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
292{
293 return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
294}
295
296static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
297 unsigned long data,
298 unsigned int len)
299{
300 if (kvm_vcpu_is_be(vcpu)) {
301 switch (len) {
302 case 1:
303 return data & 0xff;
304 case 2:
305 return be16_to_cpu(data & 0xffff);
306 default:
307 return be32_to_cpu(data);
308 }
309 } else {
310 switch (len) {
311 case 1:
312 return data & 0xff;
313 case 2:
314 return le16_to_cpu(data & 0xffff);
315 default:
316 return le32_to_cpu(data);
317 }
318 }
319}
320
321static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
322 unsigned long data,
323 unsigned int len)
324{
325 if (kvm_vcpu_is_be(vcpu)) {
326 switch (len) {
327 case 1:
328 return data & 0xff;
329 case 2:
330 return cpu_to_be16(data & 0xffff);
331 default:
332 return cpu_to_be32(data);
333 }
334 } else {
335 switch (len) {
336 case 1:
337 return data & 0xff;
338 case 2:
339 return cpu_to_le16(data & 0xffff);
340 default:
341 return cpu_to_le32(data);
342 }
343 }
344}
345
346#endif
347