1
2
3
4
5
6
7
8
9
10
11#include <stdio.h>
12#include <sys/types.h>
13#include <sys/ioctl.h>
14#include <sys/mman.h>
15
16#include <linux/kvm.h>
17
18#include "config-host.h"
19#include "qemu-common.h"
20#include "qemu/timer.h"
21#include "sysemu/sysemu.h"
22#include "sysemu/kvm.h"
23#include "kvm_arm.h"
24#include "cpu.h"
25#include "internals.h"
26#include "hw/arm/arm.h"
27
28static inline void set_feature(uint64_t *features, int feature)
29{
30 *features |= 1ULL << feature;
31}
32
33bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
34{
35
36
37
38
39
40
41
42 int fdarray[3];
43 uint64_t features = 0;
44
45
46
47
48
49 static const uint32_t cpus_to_try[] = {
50 KVM_ARM_TARGET_AEM_V8,
51 KVM_ARM_TARGET_FOUNDATION_V8,
52 KVM_ARM_TARGET_CORTEX_A57,
53 QEMU_KVM_ARM_TARGET_NONE
54 };
55 struct kvm_vcpu_init init;
56
57 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
58 return false;
59 }
60
61 ahcc->target = init.target;
62 ahcc->dtb_compatible = "arm,arm-v8";
63
64 kvm_arm_destroy_scratch_host_vcpu(fdarray);
65
66
67
68
69
70 set_feature(&features, ARM_FEATURE_V8);
71 set_feature(&features, ARM_FEATURE_VFP4);
72 set_feature(&features, ARM_FEATURE_NEON);
73 set_feature(&features, ARM_FEATURE_AARCH64);
74
75 ahcc->features = features;
76
77 return true;
78}
79
80#define ARM_MPIDR_HWID_BITMASK 0xFF00FFFFFFULL
81#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
82
83int kvm_arch_init_vcpu(CPUState *cs)
84{
85 int ret;
86 uint64_t mpidr;
87 ARMCPU *cpu = ARM_CPU(cs);
88
89 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
90 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
91 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
92 return -EINVAL;
93 }
94
95
96 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
97 if (cpu->start_powered_off) {
98 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
99 }
100 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
101 cpu->psci_version = 2;
102 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
103 }
104 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
105 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
106 }
107
108
109 ret = kvm_arm_vcpu_init(cs);
110 if (ret) {
111 return ret;
112 }
113
114
115
116
117
118
119 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
120 if (ret) {
121 return ret;
122 }
123 cpu->mp_affinity = mpidr & ARM_MPIDR_HWID_BITMASK;
124
125 return kvm_arm_init_cpreg_list(cpu);
126}
127
128bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
129{
130
131
132
133
134 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
135 case KVM_REG_ARM_CORE:
136 return false;
137 default:
138 return true;
139 }
140}
141
142typedef struct CPRegStateLevel {
143 uint64_t regidx;
144 int level;
145} CPRegStateLevel;
146
147
148
149
150
151
152static const CPRegStateLevel non_runtime_cpregs[] = {
153 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
154};
155
156int kvm_arm_cpreg_level(uint64_t regidx)
157{
158 int i;
159
160 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
161 const CPRegStateLevel *l = &non_runtime_cpregs[i];
162 if (l->regidx == regidx) {
163 return l->level;
164 }
165 }
166
167 return KVM_PUT_RUNTIME_STATE;
168}
169
170#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
171 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
172
173#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
174 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
175
176#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
177 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
178
179int kvm_arch_put_registers(CPUState *cs, int level)
180{
181 struct kvm_one_reg reg;
182 uint32_t fpr;
183 uint64_t val;
184 int i;
185 int ret;
186 unsigned int el;
187
188 ARMCPU *cpu = ARM_CPU(cs);
189 CPUARMState *env = &cpu->env;
190
191
192
193
194 if (!is_a64(env)) {
195 aarch64_sync_32_to_64(env);
196 }
197
198 for (i = 0; i < 31; i++) {
199 reg.id = AARCH64_CORE_REG(regs.regs[i]);
200 reg.addr = (uintptr_t) &env->xregs[i];
201 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
202 if (ret) {
203 return ret;
204 }
205 }
206
207
208
209
210 aarch64_save_sp(env, 1);
211
212 reg.id = AARCH64_CORE_REG(regs.sp);
213 reg.addr = (uintptr_t) &env->sp_el[0];
214 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
215 if (ret) {
216 return ret;
217 }
218
219 reg.id = AARCH64_CORE_REG(sp_el1);
220 reg.addr = (uintptr_t) &env->sp_el[1];
221 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
222 if (ret) {
223 return ret;
224 }
225
226
227 if (is_a64(env)) {
228 val = pstate_read(env);
229 } else {
230 val = cpsr_read(env);
231 }
232 reg.id = AARCH64_CORE_REG(regs.pstate);
233 reg.addr = (uintptr_t) &val;
234 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
235 if (ret) {
236 return ret;
237 }
238
239 reg.id = AARCH64_CORE_REG(regs.pc);
240 reg.addr = (uintptr_t) &env->pc;
241 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
242 if (ret) {
243 return ret;
244 }
245
246 reg.id = AARCH64_CORE_REG(elr_el1);
247 reg.addr = (uintptr_t) &env->elr_el[1];
248 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
249 if (ret) {
250 return ret;
251 }
252
253
254
255
256
257
258
259 el = arm_current_el(env);
260 if (el > 0 && !is_a64(env)) {
261 i = bank_number(env->uncached_cpsr & CPSR_M);
262 env->banked_spsr[i] = env->spsr;
263 }
264
265
266 for (i = 0; i < KVM_NR_SPSR; i++) {
267 reg.id = AARCH64_CORE_REG(spsr[i]);
268 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
269 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
270 if (ret) {
271 return ret;
272 }
273 }
274
275
276
277
278 for (i = 0; i < 32; i++) {
279 int rd = i << 1;
280 uint64_t fp_val[2];
281#ifdef HOST_WORDS_BIGENDIAN
282 fp_val[0] = env->vfp.regs[rd + 1];
283 fp_val[1] = env->vfp.regs[rd];
284#else
285 fp_val[1] = env->vfp.regs[rd + 1];
286 fp_val[0] = env->vfp.regs[rd];
287#endif
288 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
289 reg.addr = (uintptr_t)(&fp_val);
290 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
291 if (ret) {
292 return ret;
293 }
294 }
295
296 reg.addr = (uintptr_t)(&fpr);
297 fpr = vfp_get_fpsr(env);
298 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
299 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
300 if (ret) {
301 return ret;
302 }
303
304 fpr = vfp_get_fpcr(env);
305 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
306 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
307 if (ret) {
308 return ret;
309 }
310
311 if (!write_list_to_kvmstate(cpu, level)) {
312 return EINVAL;
313 }
314
315 kvm_arm_sync_mpstate_to_kvm(cpu);
316
317 return ret;
318}
319
320int kvm_arch_get_registers(CPUState *cs)
321{
322 struct kvm_one_reg reg;
323 uint64_t val;
324 uint32_t fpr;
325 unsigned int el;
326 int i;
327 int ret;
328
329 ARMCPU *cpu = ARM_CPU(cs);
330 CPUARMState *env = &cpu->env;
331
332 for (i = 0; i < 31; i++) {
333 reg.id = AARCH64_CORE_REG(regs.regs[i]);
334 reg.addr = (uintptr_t) &env->xregs[i];
335 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
336 if (ret) {
337 return ret;
338 }
339 }
340
341 reg.id = AARCH64_CORE_REG(regs.sp);
342 reg.addr = (uintptr_t) &env->sp_el[0];
343 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
344 if (ret) {
345 return ret;
346 }
347
348 reg.id = AARCH64_CORE_REG(sp_el1);
349 reg.addr = (uintptr_t) &env->sp_el[1];
350 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
351 if (ret) {
352 return ret;
353 }
354
355 reg.id = AARCH64_CORE_REG(regs.pstate);
356 reg.addr = (uintptr_t) &val;
357 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
358 if (ret) {
359 return ret;
360 }
361
362 env->aarch64 = ((val & PSTATE_nRW) == 0);
363 if (is_a64(env)) {
364 pstate_write(env, val);
365 } else {
366 env->uncached_cpsr = val & CPSR_M;
367 cpsr_write(env, val, 0xffffffff);
368 }
369
370
371
372
373 aarch64_restore_sp(env, 1);
374
375 reg.id = AARCH64_CORE_REG(regs.pc);
376 reg.addr = (uintptr_t) &env->pc;
377 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
378 if (ret) {
379 return ret;
380 }
381
382
383
384
385
386
387 if (!is_a64(env)) {
388 aarch64_sync_64_to_32(env);
389 }
390
391 reg.id = AARCH64_CORE_REG(elr_el1);
392 reg.addr = (uintptr_t) &env->elr_el[1];
393 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
394 if (ret) {
395 return ret;
396 }
397
398
399
400
401
402 for (i = 0; i < KVM_NR_SPSR; i++) {
403 reg.id = AARCH64_CORE_REG(spsr[i]);
404 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
405 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
406 if (ret) {
407 return ret;
408 }
409 }
410
411 el = arm_current_el(env);
412 if (el > 0 && !is_a64(env)) {
413 i = bank_number(env->uncached_cpsr & CPSR_M);
414 env->spsr = env->banked_spsr[i];
415 }
416
417
418
419
420 for (i = 0; i < 32; i++) {
421 uint64_t fp_val[2];
422 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
423 reg.addr = (uintptr_t)(&fp_val);
424 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
425 if (ret) {
426 return ret;
427 } else {
428 int rd = i << 1;
429#ifdef HOST_WORDS_BIGENDIAN
430 env->vfp.regs[rd + 1] = fp_val[0];
431 env->vfp.regs[rd] = fp_val[1];
432#else
433 env->vfp.regs[rd + 1] = fp_val[1];
434 env->vfp.regs[rd] = fp_val[0];
435#endif
436 }
437 }
438
439 reg.addr = (uintptr_t)(&fpr);
440 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
441 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
442 if (ret) {
443 return ret;
444 }
445 vfp_set_fpsr(env, fpr);
446
447 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
448 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
449 if (ret) {
450 return ret;
451 }
452 vfp_set_fpcr(env, fpr);
453
454 if (!write_kvmstate_to_list(cpu)) {
455 return EINVAL;
456 }
457
458
459
460 write_list_to_cpustate(cpu);
461
462 kvm_arm_sync_mpstate_to_qemu(cpu);
463
464
465 return ret;
466}
467