1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/arm-smccc.h>
19#include <linux/preempt.h>
20#include <linux/kvm_host.h>
21#include <linux/uaccess.h>
22#include <linux/wait.h>
23
24#include <asm/cputype.h>
25#include <asm/kvm_emulate.h>
26#include <asm/kvm_host.h>
27
28#include <kvm/arm_psci.h>
29
30
31
32
33
34
35#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
36
37static u32 smccc_get_function(struct kvm_vcpu *vcpu)
38{
39 return vcpu_get_reg(vcpu, 0);
40}
41
42static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
43{
44 return vcpu_get_reg(vcpu, 1);
45}
46
47static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
48{
49 return vcpu_get_reg(vcpu, 2);
50}
51
52static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
53{
54 return vcpu_get_reg(vcpu, 3);
55}
56
57static void smccc_set_retval(struct kvm_vcpu *vcpu,
58 unsigned long a0,
59 unsigned long a1,
60 unsigned long a2,
61 unsigned long a3)
62{
63 vcpu_set_reg(vcpu, 0, a0);
64 vcpu_set_reg(vcpu, 1, a1);
65 vcpu_set_reg(vcpu, 2, a2);
66 vcpu_set_reg(vcpu, 3, a3);
67}
68
69static unsigned long psci_affinity_mask(unsigned long affinity_level)
70{
71 if (affinity_level <= 3)
72 return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
73
74 return 0;
75}
76
77static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
78{
79
80
81
82
83
84
85
86
87
88
89
90
91
92 kvm_vcpu_block(vcpu);
93 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
94
95 return PSCI_RET_SUCCESS;
96}
97
98static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
99{
100 vcpu->arch.power_off = true;
101 kvm_make_request(KVM_REQ_SLEEP, vcpu);
102 kvm_vcpu_kick(vcpu);
103}
104
105static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
106{
107 struct vcpu_reset_state *reset_state;
108 struct kvm *kvm = source_vcpu->kvm;
109 struct kvm_vcpu *vcpu = NULL;
110 unsigned long cpu_id;
111
112 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
113 if (vcpu_mode_is_32bit(source_vcpu))
114 cpu_id &= ~((u32) 0);
115
116 vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
117
118
119
120
121
122 if (!vcpu)
123 return PSCI_RET_INVALID_PARAMS;
124 if (!vcpu->arch.power_off) {
125 if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
126 return PSCI_RET_ALREADY_ON;
127 else
128 return PSCI_RET_INVALID_PARAMS;
129 }
130
131 reset_state = &vcpu->arch.reset_state;
132
133 reset_state->pc = smccc_get_arg2(source_vcpu);
134
135
136 reset_state->be = kvm_vcpu_is_be(source_vcpu);
137
138
139
140
141
142 reset_state->r0 = smccc_get_arg3(source_vcpu);
143
144 WRITE_ONCE(reset_state->reset, true);
145 kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
146
147
148
149
150
151 smp_wmb();
152
153 vcpu->arch.power_off = false;
154 kvm_vcpu_wake_up(vcpu);
155
156 return PSCI_RET_SUCCESS;
157}
158
159static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
160{
161 int i, matching_cpus = 0;
162 unsigned long mpidr;
163 unsigned long target_affinity;
164 unsigned long target_affinity_mask;
165 unsigned long lowest_affinity_level;
166 struct kvm *kvm = vcpu->kvm;
167 struct kvm_vcpu *tmp;
168
169 target_affinity = smccc_get_arg1(vcpu);
170 lowest_affinity_level = smccc_get_arg2(vcpu);
171
172
173 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
174 if (!target_affinity_mask)
175 return PSCI_RET_INVALID_PARAMS;
176
177
178 target_affinity &= target_affinity_mask;
179
180
181
182
183
184 kvm_for_each_vcpu(i, tmp, kvm) {
185 mpidr = kvm_vcpu_get_mpidr_aff(tmp);
186 if ((mpidr & target_affinity_mask) == target_affinity) {
187 matching_cpus++;
188 if (!tmp->arch.power_off)
189 return PSCI_0_2_AFFINITY_LEVEL_ON;
190 }
191 }
192
193 if (!matching_cpus)
194 return PSCI_RET_INVALID_PARAMS;
195
196 return PSCI_0_2_AFFINITY_LEVEL_OFF;
197}
198
199static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
200{
201 int i;
202 struct kvm_vcpu *tmp;
203
204
205
206
207
208
209
210
211
212
213 kvm_for_each_vcpu(i, tmp, vcpu->kvm)
214 tmp->arch.power_off = true;
215 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
216
217 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
218 vcpu->run->system_event.type = type;
219 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
220}
221
222static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
223{
224 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
225}
226
227static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
228{
229 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
230}
231
232static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
233{
234 struct kvm *kvm = vcpu->kvm;
235 u32 psci_fn = smccc_get_function(vcpu);
236 unsigned long val;
237 int ret = 1;
238
239 switch (psci_fn) {
240 case PSCI_0_2_FN_PSCI_VERSION:
241
242
243
244
245 val = KVM_ARM_PSCI_0_2;
246 break;
247 case PSCI_0_2_FN_CPU_SUSPEND:
248 case PSCI_0_2_FN64_CPU_SUSPEND:
249 val = kvm_psci_vcpu_suspend(vcpu);
250 break;
251 case PSCI_0_2_FN_CPU_OFF:
252 kvm_psci_vcpu_off(vcpu);
253 val = PSCI_RET_SUCCESS;
254 break;
255 case PSCI_0_2_FN_CPU_ON:
256 case PSCI_0_2_FN64_CPU_ON:
257 mutex_lock(&kvm->lock);
258 val = kvm_psci_vcpu_on(vcpu);
259 mutex_unlock(&kvm->lock);
260 break;
261 case PSCI_0_2_FN_AFFINITY_INFO:
262 case PSCI_0_2_FN64_AFFINITY_INFO:
263 val = kvm_psci_vcpu_affinity_info(vcpu);
264 break;
265 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
266
267
268
269
270
271 val = PSCI_0_2_TOS_MP;
272 break;
273 case PSCI_0_2_FN_SYSTEM_OFF:
274 kvm_psci_system_off(vcpu);
275
276
277
278
279
280
281
282
283
284
285 val = PSCI_RET_INTERNAL_FAILURE;
286 ret = 0;
287 break;
288 case PSCI_0_2_FN_SYSTEM_RESET:
289 kvm_psci_system_reset(vcpu);
290
291
292
293
294 val = PSCI_RET_INTERNAL_FAILURE;
295 ret = 0;
296 break;
297 default:
298 val = PSCI_RET_NOT_SUPPORTED;
299 break;
300 }
301
302 smccc_set_retval(vcpu, val, 0, 0, 0);
303 return ret;
304}
305
306static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
307{
308 u32 psci_fn = smccc_get_function(vcpu);
309 u32 feature;
310 unsigned long val;
311 int ret = 1;
312
313 switch(psci_fn) {
314 case PSCI_0_2_FN_PSCI_VERSION:
315 val = KVM_ARM_PSCI_1_0;
316 break;
317 case PSCI_1_0_FN_PSCI_FEATURES:
318 feature = smccc_get_arg1(vcpu);
319 switch(feature) {
320 case PSCI_0_2_FN_PSCI_VERSION:
321 case PSCI_0_2_FN_CPU_SUSPEND:
322 case PSCI_0_2_FN64_CPU_SUSPEND:
323 case PSCI_0_2_FN_CPU_OFF:
324 case PSCI_0_2_FN_CPU_ON:
325 case PSCI_0_2_FN64_CPU_ON:
326 case PSCI_0_2_FN_AFFINITY_INFO:
327 case PSCI_0_2_FN64_AFFINITY_INFO:
328 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
329 case PSCI_0_2_FN_SYSTEM_OFF:
330 case PSCI_0_2_FN_SYSTEM_RESET:
331 case PSCI_1_0_FN_PSCI_FEATURES:
332 case ARM_SMCCC_VERSION_FUNC_ID:
333 val = 0;
334 break;
335 default:
336 val = PSCI_RET_NOT_SUPPORTED;
337 break;
338 }
339 break;
340 default:
341 return kvm_psci_0_2_call(vcpu);
342 }
343
344 smccc_set_retval(vcpu, val, 0, 0, 0);
345 return ret;
346}
347
348static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
349{
350 struct kvm *kvm = vcpu->kvm;
351 u32 psci_fn = smccc_get_function(vcpu);
352 unsigned long val;
353
354 switch (psci_fn) {
355 case KVM_PSCI_FN_CPU_OFF:
356 kvm_psci_vcpu_off(vcpu);
357 val = PSCI_RET_SUCCESS;
358 break;
359 case KVM_PSCI_FN_CPU_ON:
360 mutex_lock(&kvm->lock);
361 val = kvm_psci_vcpu_on(vcpu);
362 mutex_unlock(&kvm->lock);
363 break;
364 default:
365 val = PSCI_RET_NOT_SUPPORTED;
366 break;
367 }
368
369 smccc_set_retval(vcpu, val, 0, 0, 0);
370 return 1;
371}
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387static int kvm_psci_call(struct kvm_vcpu *vcpu)
388{
389 switch (kvm_psci_version(vcpu, vcpu->kvm)) {
390 case KVM_ARM_PSCI_1_0:
391 return kvm_psci_1_0_call(vcpu);
392 case KVM_ARM_PSCI_0_2:
393 return kvm_psci_0_2_call(vcpu);
394 case KVM_ARM_PSCI_0_1:
395 return kvm_psci_0_1_call(vcpu);
396 default:
397 return -EINVAL;
398 };
399}
400
401int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
402{
403 u32 func_id = smccc_get_function(vcpu);
404 u32 val = SMCCC_RET_NOT_SUPPORTED;
405 u32 feature;
406
407 switch (func_id) {
408 case ARM_SMCCC_VERSION_FUNC_ID:
409 val = ARM_SMCCC_VERSION_1_1;
410 break;
411 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
412 feature = smccc_get_arg1(vcpu);
413 switch(feature) {
414 case ARM_SMCCC_ARCH_WORKAROUND_1:
415 switch (kvm_arm_harden_branch_predictor()) {
416 case KVM_BP_HARDEN_UNKNOWN:
417 break;
418 case KVM_BP_HARDEN_WA_NEEDED:
419 val = SMCCC_RET_SUCCESS;
420 break;
421 case KVM_BP_HARDEN_NOT_REQUIRED:
422 val = SMCCC_RET_NOT_REQUIRED;
423 break;
424 }
425 break;
426 case ARM_SMCCC_ARCH_WORKAROUND_2:
427 switch (kvm_arm_have_ssbd()) {
428 case KVM_SSBD_FORCE_DISABLE:
429 case KVM_SSBD_UNKNOWN:
430 break;
431 case KVM_SSBD_KERNEL:
432 val = SMCCC_RET_SUCCESS;
433 break;
434 case KVM_SSBD_FORCE_ENABLE:
435 case KVM_SSBD_MITIGATED:
436 val = SMCCC_RET_NOT_REQUIRED;
437 break;
438 }
439 break;
440 }
441 break;
442 default:
443 return kvm_psci_call(vcpu);
444 }
445
446 smccc_set_retval(vcpu, val, 0, 0, 0);
447 return 1;
448}
449
450int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
451{
452 return 3;
453}
454
455int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
456{
457 if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices++))
458 return -EFAULT;
459
460 if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1, uindices++))
461 return -EFAULT;
462
463 if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
464 return -EFAULT;
465
466 return 0;
467}
468
469#define KVM_REG_FEATURE_LEVEL_WIDTH 4
470#define KVM_REG_FEATURE_LEVEL_MASK (BIT(KVM_REG_FEATURE_LEVEL_WIDTH) - 1)
471
472
473
474
475
476static int get_kernel_wa_level(u64 regid)
477{
478 switch (regid) {
479 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
480 switch (kvm_arm_harden_branch_predictor()) {
481 case KVM_BP_HARDEN_UNKNOWN:
482 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
483 case KVM_BP_HARDEN_WA_NEEDED:
484 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
485 case KVM_BP_HARDEN_NOT_REQUIRED:
486 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
487 }
488 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
489 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
490 switch (kvm_arm_have_ssbd()) {
491 case KVM_SSBD_FORCE_DISABLE:
492 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
493 case KVM_SSBD_KERNEL:
494 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL;
495 case KVM_SSBD_FORCE_ENABLE:
496 case KVM_SSBD_MITIGATED:
497 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
498 case KVM_SSBD_UNKNOWN:
499 default:
500 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN;
501 }
502 }
503
504 return -EINVAL;
505}
506
507int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
508{
509 void __user *uaddr = (void __user *)(long)reg->addr;
510 u64 val;
511
512 switch (reg->id) {
513 case KVM_REG_ARM_PSCI_VERSION:
514 val = kvm_psci_version(vcpu, vcpu->kvm);
515 break;
516 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
517 val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
518 break;
519 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
520 val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
521
522 if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
523 kvm_arm_get_vcpu_workaround_2_flag(vcpu))
524 val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED;
525 break;
526 default:
527 return -ENOENT;
528 }
529
530 if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
531 return -EFAULT;
532
533 return 0;
534}
535
536int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
537{
538 void __user *uaddr = (void __user *)(long)reg->addr;
539 u64 val;
540 int wa_level;
541
542 if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
543 return -EFAULT;
544
545 switch (reg->id) {
546 case KVM_REG_ARM_PSCI_VERSION:
547 {
548 bool wants_02;
549
550 wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
551
552 switch (val) {
553 case KVM_ARM_PSCI_0_1:
554 if (wants_02)
555 return -EINVAL;
556 vcpu->kvm->arch.psci_version = val;
557 return 0;
558 case KVM_ARM_PSCI_0_2:
559 case KVM_ARM_PSCI_1_0:
560 if (!wants_02)
561 return -EINVAL;
562 vcpu->kvm->arch.psci_version = val;
563 return 0;
564 }
565 break;
566 }
567
568 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
569 if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
570 return -EINVAL;
571
572 if (get_kernel_wa_level(reg->id) < val)
573 return -EINVAL;
574
575 return 0;
576
577 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
578 if (val & ~(KVM_REG_FEATURE_LEVEL_MASK |
579 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
580 return -EINVAL;
581
582 wa_level = val & KVM_REG_FEATURE_LEVEL_MASK;
583
584 if (get_kernel_wa_level(reg->id) < wa_level)
585 return -EINVAL;
586
587
588 if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
589 wa_level != val)
590 return -EINVAL;
591
592
593 if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL)
594 return 0;
595
596
597
598
599
600 switch (wa_level) {
601 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
602 kvm_arm_set_vcpu_workaround_2_flag(vcpu,
603 val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED);
604 break;
605 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
606 kvm_arm_set_vcpu_workaround_2_flag(vcpu, true);
607 break;
608 }
609
610 return 0;
611 default:
612 return -ENOENT;
613 }
614
615 return -EINVAL;
616}
617