1
2
3
4
5
6
7
8
9
10#include "test_util.h"
11#include "kvm_util.h"
12#include "processor.h"
13#include "vmx.h"
14
15#include <errno.h>
16#include <linux/kvm.h>
17#include <string.h>
18#include <sys/ioctl.h>
19#include <unistd.h>
20
21
22
23
24
25#define VMCS12_REVISION 0x11e57ed0
26#define VCPU_ID 5
27
28bool have_evmcs;
29
30void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state)
31{
32 vcpu_nested_state_set(vm, VCPU_ID, state, false);
33}
34
35void test_nested_state_expect_errno(struct kvm_vm *vm,
36 struct kvm_nested_state *state,
37 int expected_errno)
38{
39 int rv;
40
41 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true);
42 TEST_ASSERT(rv == -1 && errno == expected_errno,
43 "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
44 strerror(expected_errno), expected_errno, rv, strerror(errno),
45 errno);
46}
47
48void test_nested_state_expect_einval(struct kvm_vm *vm,
49 struct kvm_nested_state *state)
50{
51 test_nested_state_expect_errno(vm, state, EINVAL);
52}
53
54void test_nested_state_expect_efault(struct kvm_vm *vm,
55 struct kvm_nested_state *state)
56{
57 test_nested_state_expect_errno(vm, state, EFAULT);
58}
59
60void set_revision_id_for_vmcs12(struct kvm_nested_state *state,
61 u32 vmcs12_revision)
62{
63
64 memcpy(&state->data, &vmcs12_revision, sizeof(u32));
65}
66
67void set_default_state(struct kvm_nested_state *state)
68{
69 memset(state, 0, sizeof(*state));
70 state->flags = KVM_STATE_NESTED_RUN_PENDING |
71 KVM_STATE_NESTED_GUEST_MODE;
72 state->format = 0;
73 state->size = sizeof(*state);
74}
75
76void set_default_vmx_state(struct kvm_nested_state *state, int size)
77{
78 memset(state, 0, size);
79 if (have_evmcs)
80 state->flags = KVM_STATE_NESTED_EVMCS;
81 state->format = 0;
82 state->size = size;
83 state->hdr.vmx.vmxon_pa = 0x1000;
84 state->hdr.vmx.vmcs12_pa = 0x2000;
85 state->hdr.vmx.smm.flags = 0;
86 set_revision_id_for_vmcs12(state, VMCS12_REVISION);
87}
88
89void test_vmx_nested_state(struct kvm_vm *vm)
90{
91
92 const int state_sz = sizeof(struct kvm_nested_state) + getpagesize();
93 struct kvm_nested_state *state =
94 (struct kvm_nested_state *)malloc(state_sz);
95
96
97 set_default_vmx_state(state, state_sz);
98 state->format = 1;
99 test_nested_state_expect_einval(vm, state);
100
101
102
103
104
105 set_default_vmx_state(state, state_sz);
106 test_nested_state_expect_einval(vm, state);
107
108
109
110
111
112
113 set_default_vmx_state(state, state_sz);
114 state->hdr.vmx.vmxon_pa = -1ull;
115 test_nested_state_expect_einval(vm, state);
116
117 state->hdr.vmx.vmcs12_pa = -1ull;
118 state->flags = KVM_STATE_NESTED_EVMCS;
119 test_nested_state_expect_einval(vm, state);
120
121 state->flags = 0;
122 test_nested_state(vm, state);
123
124
125 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
126
127
128
129
130
131
132
133 set_default_vmx_state(state, state_sz);
134 state->hdr.vmx.vmxon_pa = -1ull;
135 state->hdr.vmx.vmcs12_pa = -1ull;
136 test_nested_state_expect_einval(vm, state);
137
138 state->flags &= KVM_STATE_NESTED_EVMCS;
139 if (have_evmcs) {
140 test_nested_state_expect_einval(vm, state);
141 vcpu_enable_evmcs(vm, VCPU_ID);
142 }
143 test_nested_state(vm, state);
144
145
146 state->hdr.vmx.smm.flags = 1;
147 test_nested_state_expect_einval(vm, state);
148
149
150 set_default_vmx_state(state, state_sz);
151 state->hdr.vmx.flags = ~0;
152 test_nested_state_expect_einval(vm, state);
153
154
155 set_default_vmx_state(state, state_sz);
156 state->hdr.vmx.vmxon_pa = -1ull;
157 state->flags = 0;
158 test_nested_state_expect_einval(vm, state);
159
160
161 set_default_vmx_state(state, state_sz);
162 state->hdr.vmx.vmxon_pa = 1;
163 test_nested_state_expect_einval(vm, state);
164
165
166
167
168
169 set_default_vmx_state(state, state_sz);
170 state->flags = KVM_STATE_NESTED_GUEST_MODE |
171 KVM_STATE_NESTED_RUN_PENDING;
172 state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
173 test_nested_state_expect_einval(vm, state);
174
175
176
177
178
179
180 set_default_vmx_state(state, state_sz);
181 state->hdr.vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE |
182 KVM_STATE_NESTED_SMM_VMXON);
183 test_nested_state_expect_einval(vm, state);
184
185
186 set_default_vmx_state(state, state_sz);
187 state->flags = 0;
188 state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
189 test_nested_state_expect_einval(vm, state);
190
191
192
193
194
195 set_default_vmx_state(state, state_sz);
196 state->size = sizeof(*state);
197 state->flags = 0;
198 test_nested_state_expect_einval(vm, state);
199
200 set_default_vmx_state(state, state_sz);
201 state->size = sizeof(*state);
202 state->flags = 0;
203 state->hdr.vmx.vmcs12_pa = -1;
204 test_nested_state(vm, state);
205
206
207
208
209
210 set_default_vmx_state(state, state_sz);
211 state->flags = 0;
212 test_nested_state(vm, state);
213
214
215 set_default_vmx_state(state, state_sz);
216 state->size = sizeof(*state);
217 state->flags = 0;
218 state->hdr.vmx.vmcs12_pa = -1;
219 state->hdr.vmx.flags = ~0;
220 test_nested_state_expect_einval(vm, state);
221
222
223 set_default_vmx_state(state, state_sz);
224 state->hdr.vmx.vmxon_pa = 0;
225 state->hdr.vmx.vmcs12_pa = 0;
226 test_nested_state_expect_einval(vm, state);
227
228
229
230
231
232 set_default_vmx_state(state, state_sz);
233 state->hdr.vmx.vmxon_pa = -1ull;
234 state->hdr.vmx.vmcs12_pa = -1ull;
235 state->flags = 0;
236 test_nested_state(vm, state);
237 vcpu_nested_state_get(vm, VCPU_ID, state);
238 TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz,
239 "Size must be between %ld and %d. The size returned was %d.",
240 sizeof(*state), state_sz, state->size);
241 TEST_ASSERT(state->hdr.vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull.");
242 TEST_ASSERT(state->hdr.vmx.vmcs12_pa == -1ull, "vmcs_pa must be -1ull.");
243
244 free(state);
245}
246
247void disable_vmx(struct kvm_vm *vm)
248{
249 struct kvm_cpuid2 *cpuid = kvm_get_supported_cpuid();
250 int i;
251
252 for (i = 0; i < cpuid->nent; ++i)
253 if (cpuid->entries[i].function == 1 &&
254 cpuid->entries[i].index == 0)
255 break;
256 TEST_ASSERT(i != cpuid->nent, "CPUID function 1 not found");
257
258 cpuid->entries[i].ecx &= ~CPUID_VMX;
259 vcpu_set_cpuid(vm, VCPU_ID, cpuid);
260 cpuid->entries[i].ecx |= CPUID_VMX;
261}
262
263int main(int argc, char *argv[])
264{
265 struct kvm_vm *vm;
266 struct kvm_nested_state state;
267
268 have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
269
270 if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
271 print_skip("KVM_CAP_NESTED_STATE not available");
272 exit(KSFT_SKIP);
273 }
274
275
276
277
278
279 nested_vmx_check_supported();
280
281 vm = vm_create_default(VCPU_ID, 0, 0);
282
283
284
285
286 disable_vmx(vm);
287
288
289 test_nested_state_expect_efault(vm, NULL);
290
291
292 set_default_state(&state);
293 state.size = 0;
294 test_nested_state_expect_einval(vm, &state);
295
296
297
298
299
300
301
302
303 set_default_state(&state);
304 state.flags = 0xf;
305 test_nested_state_expect_einval(vm, &state);
306
307
308
309
310
311 set_default_state(&state);
312 state.flags = KVM_STATE_NESTED_RUN_PENDING;
313 test_nested_state_expect_einval(vm, &state);
314
315 test_vmx_nested_state(vm);
316
317 kvm_vm_free(vm);
318 return 0;
319}
320