1
2
3
4
5
6
7
8
9
10
11#include <stdio.h>
12#include <sys/types.h>
13#include <sys/ioctl.h>
14#include <sys/mman.h>
15
16#include <linux/kvm.h>
17
18#include "config-host.h"
19#include "qemu-common.h"
20#include "qemu/timer.h"
21#include "sysemu/sysemu.h"
22#include "sysemu/kvm.h"
23#include "kvm_arm.h"
24#include "cpu.h"
25#include "internals.h"
26#include "hw/arm/arm.h"
27
28static inline void set_feature(uint64_t *features, int feature)
29{
30 *features |= 1ULL << feature;
31}
32
33bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
34{
35
36
37
38
39
40
41
42 int fdarray[3];
43 uint64_t features = 0;
44
45
46
47
48
49 static const uint32_t cpus_to_try[] = {
50 KVM_ARM_TARGET_AEM_V8,
51 KVM_ARM_TARGET_FOUNDATION_V8,
52 KVM_ARM_TARGET_CORTEX_A57,
53 QEMU_KVM_ARM_TARGET_NONE
54 };
55 struct kvm_vcpu_init init;
56
57 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
58 return false;
59 }
60
61 ahcc->target = init.target;
62 ahcc->dtb_compatible = "arm,arm-v8";
63
64 kvm_arm_destroy_scratch_host_vcpu(fdarray);
65
66
67
68
69
70 set_feature(&features, ARM_FEATURE_V8);
71 set_feature(&features, ARM_FEATURE_VFP4);
72 set_feature(&features, ARM_FEATURE_NEON);
73 set_feature(&features, ARM_FEATURE_AARCH64);
74
75 ahcc->features = features;
76
77 return true;
78}
79
80int kvm_arch_init_vcpu(CPUState *cs)
81{
82 int ret;
83 ARMCPU *cpu = ARM_CPU(cs);
84
85 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
86 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
87 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
88 return -EINVAL;
89 }
90
91
92 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
93 if (cpu->start_powered_off) {
94 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
95 }
96 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
97 cpu->psci_version = 2;
98 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
99 }
100 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
101 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
102 }
103
104
105 ret = kvm_arm_vcpu_init(cs);
106 if (ret) {
107 return ret;
108 }
109
110 return kvm_arm_init_cpreg_list(cpu);
111}
112
113bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
114{
115
116
117
118
119 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
120 case KVM_REG_ARM_CORE:
121 return false;
122 default:
123 return true;
124 }
125}
126
127#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
128 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
129
130#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
131 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
132
133#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
134 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
135
136int kvm_arch_put_registers(CPUState *cs, int level)
137{
138 struct kvm_one_reg reg;
139 uint32_t fpr;
140 uint64_t val;
141 int i;
142 int ret;
143 unsigned int el;
144
145 ARMCPU *cpu = ARM_CPU(cs);
146 CPUARMState *env = &cpu->env;
147
148
149
150
151 if (!is_a64(env)) {
152 aarch64_sync_32_to_64(env);
153 }
154
155 for (i = 0; i < 31; i++) {
156 reg.id = AARCH64_CORE_REG(regs.regs[i]);
157 reg.addr = (uintptr_t) &env->xregs[i];
158 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
159 if (ret) {
160 return ret;
161 }
162 }
163
164
165
166
167 aarch64_save_sp(env, 1);
168
169 reg.id = AARCH64_CORE_REG(regs.sp);
170 reg.addr = (uintptr_t) &env->sp_el[0];
171 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
172 if (ret) {
173 return ret;
174 }
175
176 reg.id = AARCH64_CORE_REG(sp_el1);
177 reg.addr = (uintptr_t) &env->sp_el[1];
178 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
179 if (ret) {
180 return ret;
181 }
182
183
184 if (is_a64(env)) {
185 val = pstate_read(env);
186 } else {
187 val = cpsr_read(env);
188 }
189 reg.id = AARCH64_CORE_REG(regs.pstate);
190 reg.addr = (uintptr_t) &val;
191 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
192 if (ret) {
193 return ret;
194 }
195
196 reg.id = AARCH64_CORE_REG(regs.pc);
197 reg.addr = (uintptr_t) &env->pc;
198 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
199 if (ret) {
200 return ret;
201 }
202
203 reg.id = AARCH64_CORE_REG(elr_el1);
204 reg.addr = (uintptr_t) &env->elr_el[1];
205 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
206 if (ret) {
207 return ret;
208 }
209
210
211
212
213
214
215
216 el = arm_current_el(env);
217 if (el > 0 && !is_a64(env)) {
218 i = bank_number(env->uncached_cpsr & CPSR_M);
219 env->banked_spsr[i] = env->spsr;
220 }
221
222
223 for (i = 0; i < KVM_NR_SPSR; i++) {
224 reg.id = AARCH64_CORE_REG(spsr[i]);
225 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
226 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
227 if (ret) {
228 return ret;
229 }
230 }
231
232
233
234
235 for (i = 0; i < 32; i++) {
236 int rd = i << 1;
237 uint64_t fp_val[2];
238#ifdef HOST_WORDS_BIGENDIAN
239 fp_val[0] = env->vfp.regs[rd + 1];
240 fp_val[1] = env->vfp.regs[rd];
241#else
242 fp_val[1] = env->vfp.regs[rd + 1];
243 fp_val[0] = env->vfp.regs[rd];
244#endif
245 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
246 reg.addr = (uintptr_t)(&fp_val);
247 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
248 if (ret) {
249 return ret;
250 }
251 }
252
253 reg.addr = (uintptr_t)(&fpr);
254 fpr = vfp_get_fpsr(env);
255 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
256 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
257 if (ret) {
258 return ret;
259 }
260
261 fpr = vfp_get_fpcr(env);
262 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
263 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
264 if (ret) {
265 return ret;
266 }
267
268 if (!write_list_to_kvmstate(cpu)) {
269 return EINVAL;
270 }
271
272 kvm_arm_sync_mpstate_to_kvm(cpu);
273
274 return ret;
275}
276
277int kvm_arch_get_registers(CPUState *cs)
278{
279 struct kvm_one_reg reg;
280 uint64_t val;
281 uint32_t fpr;
282 unsigned int el;
283 int i;
284 int ret;
285
286 ARMCPU *cpu = ARM_CPU(cs);
287 CPUARMState *env = &cpu->env;
288
289 for (i = 0; i < 31; i++) {
290 reg.id = AARCH64_CORE_REG(regs.regs[i]);
291 reg.addr = (uintptr_t) &env->xregs[i];
292 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
293 if (ret) {
294 return ret;
295 }
296 }
297
298 reg.id = AARCH64_CORE_REG(regs.sp);
299 reg.addr = (uintptr_t) &env->sp_el[0];
300 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
301 if (ret) {
302 return ret;
303 }
304
305 reg.id = AARCH64_CORE_REG(sp_el1);
306 reg.addr = (uintptr_t) &env->sp_el[1];
307 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
308 if (ret) {
309 return ret;
310 }
311
312 reg.id = AARCH64_CORE_REG(regs.pstate);
313 reg.addr = (uintptr_t) &val;
314 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
315 if (ret) {
316 return ret;
317 }
318
319 env->aarch64 = ((val & PSTATE_nRW) == 0);
320 if (is_a64(env)) {
321 pstate_write(env, val);
322 } else {
323 env->uncached_cpsr = val & CPSR_M;
324 cpsr_write(env, val, 0xffffffff);
325 }
326
327
328
329
330 aarch64_restore_sp(env, 1);
331
332 reg.id = AARCH64_CORE_REG(regs.pc);
333 reg.addr = (uintptr_t) &env->pc;
334 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
335 if (ret) {
336 return ret;
337 }
338
339
340
341
342
343
344 if (!is_a64(env)) {
345 aarch64_sync_64_to_32(env);
346 }
347
348 reg.id = AARCH64_CORE_REG(elr_el1);
349 reg.addr = (uintptr_t) &env->elr_el[1];
350 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
351 if (ret) {
352 return ret;
353 }
354
355
356
357
358
359 for (i = 0; i < KVM_NR_SPSR; i++) {
360 reg.id = AARCH64_CORE_REG(spsr[i]);
361 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
362 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
363 if (ret) {
364 return ret;
365 }
366 }
367
368 el = arm_current_el(env);
369 if (el > 0 && !is_a64(env)) {
370 i = bank_number(env->uncached_cpsr & CPSR_M);
371 env->spsr = env->banked_spsr[i];
372 }
373
374
375
376
377 for (i = 0; i < 32; i++) {
378 uint64_t fp_val[2];
379 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
380 reg.addr = (uintptr_t)(&fp_val);
381 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
382 if (ret) {
383 return ret;
384 } else {
385 int rd = i << 1;
386#ifdef HOST_WORDS_BIGENDIAN
387 env->vfp.regs[rd + 1] = fp_val[0];
388 env->vfp.regs[rd] = fp_val[1];
389#else
390 env->vfp.regs[rd + 1] = fp_val[1];
391 env->vfp.regs[rd] = fp_val[0];
392#endif
393 }
394 }
395
396 reg.addr = (uintptr_t)(&fpr);
397 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
398 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
399 if (ret) {
400 return ret;
401 }
402 vfp_set_fpsr(env, fpr);
403
404 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
405 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
406 if (ret) {
407 return ret;
408 }
409 vfp_set_fpcr(env, fpr);
410
411 if (!write_kvmstate_to_list(cpu)) {
412 return EINVAL;
413 }
414
415
416
417 write_list_to_cpustate(cpu);
418
419 kvm_arm_sync_mpstate_to_qemu(cpu);
420
421
422 return ret;
423}
424