1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/preempt.h>
19#include <linux/kvm_host.h>
20#include <linux/wait.h>
21
22#include <asm/cputype.h>
23#include <asm/kvm_emulate.h>
24#include <asm/kvm_psci.h>
25#include <asm/kvm_host.h>
26
27#include <uapi/linux/psci.h>
28
29
30
31
32
33
34#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
35
36static unsigned long psci_affinity_mask(unsigned long affinity_level)
37{
38 if (affinity_level <= 3)
39 return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
40
41 return 0;
42}
43
44static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
45{
46
47
48
49
50
51
52
53
54
55
56
57
58
59 kvm_vcpu_block(vcpu);
60
61 return PSCI_RET_SUCCESS;
62}
63
64static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
65{
66 vcpu->arch.power_off = true;
67}
68
69static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
70{
71 struct kvm *kvm = source_vcpu->kvm;
72 struct kvm_vcpu *vcpu = NULL;
73 struct swait_queue_head *wq;
74 unsigned long cpu_id;
75 unsigned long context_id;
76 phys_addr_t target_pc;
77
78 cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
79 if (vcpu_mode_is_32bit(source_vcpu))
80 cpu_id &= ~((u32) 0);
81
82 vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
83
84
85
86
87
88 if (!vcpu)
89 return PSCI_RET_INVALID_PARAMS;
90 if (!vcpu->arch.power_off) {
91 if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
92 return PSCI_RET_ALREADY_ON;
93 else
94 return PSCI_RET_INVALID_PARAMS;
95 }
96
97 target_pc = vcpu_get_reg(source_vcpu, 2);
98 context_id = vcpu_get_reg(source_vcpu, 3);
99
100 kvm_reset_vcpu(vcpu);
101
102
103 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
104 target_pc &= ~((phys_addr_t) 1);
105 vcpu_set_thumb(vcpu);
106 }
107
108
109 if (kvm_vcpu_is_be(source_vcpu))
110 kvm_vcpu_set_be(vcpu);
111
112 *vcpu_pc(vcpu) = target_pc;
113
114
115
116
117 vcpu_set_reg(vcpu, 0, context_id);
118 vcpu->arch.power_off = false;
119 smp_mb();
120
121 wq = kvm_arch_vcpu_wq(vcpu);
122 swake_up(wq);
123
124 return PSCI_RET_SUCCESS;
125}
126
127static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
128{
129 int i, matching_cpus = 0;
130 unsigned long mpidr;
131 unsigned long target_affinity;
132 unsigned long target_affinity_mask;
133 unsigned long lowest_affinity_level;
134 struct kvm *kvm = vcpu->kvm;
135 struct kvm_vcpu *tmp;
136
137 target_affinity = vcpu_get_reg(vcpu, 1);
138 lowest_affinity_level = vcpu_get_reg(vcpu, 2);
139
140
141 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
142 if (!target_affinity_mask)
143 return PSCI_RET_INVALID_PARAMS;
144
145
146 target_affinity &= target_affinity_mask;
147
148
149
150
151
152 kvm_for_each_vcpu(i, tmp, kvm) {
153 mpidr = kvm_vcpu_get_mpidr_aff(tmp);
154 if ((mpidr & target_affinity_mask) == target_affinity) {
155 matching_cpus++;
156 if (!tmp->arch.power_off)
157 return PSCI_0_2_AFFINITY_LEVEL_ON;
158 }
159 }
160
161 if (!matching_cpus)
162 return PSCI_RET_INVALID_PARAMS;
163
164 return PSCI_0_2_AFFINITY_LEVEL_OFF;
165}
166
167static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
168{
169 int i;
170 struct kvm_vcpu *tmp;
171
172
173
174
175
176
177
178
179
180
181 kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
182 tmp->arch.power_off = true;
183 kvm_vcpu_kick(tmp);
184 }
185
186 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
187 vcpu->run->system_event.type = type;
188 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
189}
190
191static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
192{
193 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
194}
195
196static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
197{
198 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
199}
200
201int kvm_psci_version(struct kvm_vcpu *vcpu)
202{
203 if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
204 return KVM_ARM_PSCI_0_2;
205
206 return KVM_ARM_PSCI_0_1;
207}
208
209static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
210{
211 int ret = 1;
212 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
213 unsigned long val;
214
215 switch (psci_fn) {
216 case PSCI_0_2_FN_PSCI_VERSION:
217
218
219
220
221 val = 2;
222 break;
223 case PSCI_0_2_FN_CPU_SUSPEND:
224 case PSCI_0_2_FN64_CPU_SUSPEND:
225 val = kvm_psci_vcpu_suspend(vcpu);
226 break;
227 case PSCI_0_2_FN_CPU_OFF:
228 kvm_psci_vcpu_off(vcpu);
229 val = PSCI_RET_SUCCESS;
230 break;
231 case PSCI_0_2_FN_CPU_ON:
232 case PSCI_0_2_FN64_CPU_ON:
233 val = kvm_psci_vcpu_on(vcpu);
234 break;
235 case PSCI_0_2_FN_AFFINITY_INFO:
236 case PSCI_0_2_FN64_AFFINITY_INFO:
237 val = kvm_psci_vcpu_affinity_info(vcpu);
238 break;
239 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
240
241
242
243
244
245 val = PSCI_0_2_TOS_MP;
246 break;
247 case PSCI_0_2_FN_SYSTEM_OFF:
248 kvm_psci_system_off(vcpu);
249
250
251
252
253
254
255
256
257
258
259 val = PSCI_RET_INTERNAL_FAILURE;
260 ret = 0;
261 break;
262 case PSCI_0_2_FN_SYSTEM_RESET:
263 kvm_psci_system_reset(vcpu);
264
265
266
267
268 val = PSCI_RET_INTERNAL_FAILURE;
269 ret = 0;
270 break;
271 default:
272 val = PSCI_RET_NOT_SUPPORTED;
273 break;
274 }
275
276 vcpu_set_reg(vcpu, 0, val);
277 return ret;
278}
279
280static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
281{
282 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
283 unsigned long val;
284
285 switch (psci_fn) {
286 case KVM_PSCI_FN_CPU_OFF:
287 kvm_psci_vcpu_off(vcpu);
288 val = PSCI_RET_SUCCESS;
289 break;
290 case KVM_PSCI_FN_CPU_ON:
291 val = kvm_psci_vcpu_on(vcpu);
292 break;
293 default:
294 val = PSCI_RET_NOT_SUPPORTED;
295 break;
296 }
297
298 vcpu_set_reg(vcpu, 0, val);
299 return 1;
300}
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316int kvm_psci_call(struct kvm_vcpu *vcpu)
317{
318 switch (kvm_psci_version(vcpu)) {
319 case KVM_ARM_PSCI_0_2:
320 return kvm_psci_0_2_call(vcpu);
321 case KVM_ARM_PSCI_0_1:
322 return kvm_psci_0_1_call(vcpu);
323 default:
324 return -EINVAL;
325 };
326}
327