1
2
3
4
5
6
7
8
9#define _GNU_SOURCE
10#include <fcntl.h>
11#include <stdio.h>
12#include <stdlib.h>
13#include <string.h>
14#include <sys/ioctl.h>
15
16#include "test_util.h"
17
18#include "kvm_util.h"
19#include "processor.h"
20#include "vmx.h"
21#include "svm_util.h"
22
23#define VCPU_ID 5
24#define L2_GUEST_STACK_SIZE 256
25
26void svm_l2_guest_code(void)
27{
28 GUEST_SYNC(4);
29
30 vmcall();
31 GUEST_SYNC(6);
32
33 vmcall();
34}
35
36static void svm_l1_guest_code(struct svm_test_data *svm)
37{
38 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
39 struct vmcb *vmcb = svm->vmcb;
40
41 GUEST_ASSERT(svm->vmcb_gpa);
42
43 generic_svm_setup(svm, svm_l2_guest_code,
44 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
45
46 GUEST_SYNC(3);
47 run_guest(vmcb, svm->vmcb_gpa);
48 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
49 GUEST_SYNC(5);
50 vmcb->save.rip += 3;
51 run_guest(vmcb, svm->vmcb_gpa);
52 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
53 GUEST_SYNC(7);
54}
55
56void vmx_l2_guest_code(void)
57{
58 GUEST_SYNC(6);
59
60
61 vmcall();
62
63
64 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
65 GUEST_SYNC(10);
66 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
67 GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
68 GUEST_SYNC(11);
69 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
70 GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
71 GUEST_SYNC(12);
72
73
74 vmcall();
75}
76
77static void vmx_l1_guest_code(struct vmx_pages *vmx_pages)
78{
79 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
80
81 GUEST_ASSERT(vmx_pages->vmcs_gpa);
82 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
83 GUEST_SYNC(3);
84 GUEST_ASSERT(load_vmcs(vmx_pages));
85 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
86
87 GUEST_SYNC(4);
88 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
89
90 prepare_vmcs(vmx_pages, vmx_l2_guest_code,
91 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
92
93 GUEST_SYNC(5);
94 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
95 GUEST_ASSERT(!vmlaunch());
96 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
97 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
98
99
100 GUEST_ASSERT(vmlaunch());
101
102 GUEST_ASSERT(!vmresume());
103 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
104
105 GUEST_SYNC(7);
106 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
107
108 GUEST_ASSERT(!vmresume());
109 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
110
111 vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + 3);
112
113 vmwrite(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
114 vmwrite(VMCS_LINK_POINTER, vmx_pages->shadow_vmcs_gpa);
115
116 GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
117 GUEST_ASSERT(vmlaunch());
118 GUEST_SYNC(8);
119 GUEST_ASSERT(vmlaunch());
120 GUEST_ASSERT(vmresume());
121
122 vmwrite(GUEST_RIP, 0xc0ffee);
123 GUEST_SYNC(9);
124 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
125
126 GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
127 GUEST_ASSERT(!vmresume());
128 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
129
130 GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
131 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
132 GUEST_ASSERT(vmlaunch());
133 GUEST_ASSERT(vmresume());
134 GUEST_SYNC(13);
135 GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
136 GUEST_ASSERT(vmlaunch());
137 GUEST_ASSERT(vmresume());
138}
139
140static void __attribute__((__flatten__)) guest_code(void *arg)
141{
142 GUEST_SYNC(1);
143 GUEST_SYNC(2);
144
145 if (arg) {
146 if (cpu_has_svm())
147 svm_l1_guest_code(arg);
148 else
149 vmx_l1_guest_code(arg);
150 }
151
152 GUEST_DONE();
153}
154
155int main(int argc, char *argv[])
156{
157 vm_vaddr_t nested_gva = 0;
158
159 struct kvm_regs regs1, regs2;
160 struct kvm_vm *vm;
161 struct kvm_run *run;
162 struct kvm_x86_state *state;
163 struct ucall uc;
164 int stage;
165
166
167 vm = vm_create_default(VCPU_ID, 0, guest_code);
168 run = vcpu_state(vm, VCPU_ID);
169
170 vcpu_regs_get(vm, VCPU_ID, ®s1);
171
172 if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
173 if (nested_svm_supported())
174 vcpu_alloc_svm(vm, &nested_gva);
175 else if (nested_vmx_supported())
176 vcpu_alloc_vmx(vm, &nested_gva);
177 }
178
179 if (!nested_gva)
180 pr_info("will skip nested state checks\n");
181
182 vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
183
184 for (stage = 1;; stage++) {
185 _vcpu_run(vm, VCPU_ID);
186 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
187 "Stage %d: unexpected exit reason: %u (%s),\n",
188 stage, run->exit_reason,
189 exit_reason_str(run->exit_reason));
190
191 switch (get_ucall(vm, VCPU_ID, &uc)) {
192 case UCALL_ABORT:
193 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
194 __FILE__, uc.args[1]);
195
196 case UCALL_SYNC:
197 break;
198 case UCALL_DONE:
199 goto done;
200 default:
201 TEST_FAIL("Unknown ucall %lu", uc.cmd);
202 }
203
204
205 TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
206 uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
207 stage, (ulong)uc.args[1]);
208
209 state = vcpu_save_state(vm, VCPU_ID);
210 memset(®s1, 0, sizeof(regs1));
211 vcpu_regs_get(vm, VCPU_ID, ®s1);
212
213 kvm_vm_release(vm);
214
215
216 kvm_vm_restart(vm, O_RDWR);
217 vm_vcpu_add(vm, VCPU_ID);
218 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
219 vcpu_load_state(vm, VCPU_ID, state);
220 run = vcpu_state(vm, VCPU_ID);
221 free(state);
222
223 memset(®s2, 0, sizeof(regs2));
224 vcpu_regs_get(vm, VCPU_ID, ®s2);
225 TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
226 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
227 (ulong) regs2.rdi, (ulong) regs2.rsi);
228 }
229
230done:
231 kvm_vm_free(vm);
232}
233