1
2
3
4
5
6
7
8#include <linux/compiler.h>
9#include <assert.h>
10
11#include "kvm_util.h"
12#include "../kvm_util_internal.h"
13#include "processor.h"
14
15#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
16
17static vm_vaddr_t exception_handlers;
18
19static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
20{
21 return (v + vm->page_size) & ~(vm->page_size - 1);
22}
23
24static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
25{
26 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
27 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
28
29 return (gva >> shift) & mask;
30}
31
32static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
33{
34 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
35 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
36
37 TEST_ASSERT(vm->pgtable_levels == 4,
38 "Mode %d does not have 4 page table levels", vm->mode);
39
40 return (gva >> shift) & mask;
41}
42
43static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
44{
45 unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
46 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
47
48 TEST_ASSERT(vm->pgtable_levels >= 3,
49 "Mode %d does not have >= 3 page table levels", vm->mode);
50
51 return (gva >> shift) & mask;
52}
53
54static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
55{
56 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
57 return (gva >> vm->page_shift) & mask;
58}
59
60static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
61{
62 uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift;
63 return entry & mask;
64}
65
66static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
67{
68 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
69 return 1 << (vm->va_bits - shift);
70}
71
72static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
73{
74 return 1 << (vm->page_shift - 3);
75}
76
77void virt_pgd_alloc(struct kvm_vm *vm)
78{
79 if (!vm->pgd_created) {
80 vm_paddr_t paddr = vm_phy_pages_alloc(vm,
81 page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
82 KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
83 vm->pgd = paddr;
84 vm->pgd_created = true;
85 }
86}
87
88static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
89 uint64_t flags)
90{
91 uint8_t attr_idx = flags & 7;
92 uint64_t *ptep;
93
94 TEST_ASSERT((vaddr % vm->page_size) == 0,
95 "Virtual address not on page boundary,\n"
96 " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
97 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
98 (vaddr >> vm->page_shift)),
99 "Invalid virtual address, vaddr: 0x%lx", vaddr);
100 TEST_ASSERT((paddr % vm->page_size) == 0,
101 "Physical address not on page boundary,\n"
102 " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
103 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
104 "Physical address beyond beyond maximum supported,\n"
105 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
106 paddr, vm->max_gfn, vm->page_size);
107
108 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
109 if (!*ptep)
110 *ptep = vm_alloc_page_table(vm) | 3;
111
112 switch (vm->pgtable_levels) {
113 case 4:
114 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
115 if (!*ptep)
116 *ptep = vm_alloc_page_table(vm) | 3;
117
118 case 3:
119 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
120 if (!*ptep)
121 *ptep = vm_alloc_page_table(vm) | 3;
122
123 case 2:
124 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
125 break;
126 default:
127 TEST_FAIL("Page table levels must be 2, 3, or 4");
128 }
129
130 *ptep = paddr | 3;
131 *ptep |= (attr_idx << 2) | (1 << 10) ;
132}
133
134void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
135{
136 uint64_t attr_idx = 4;
137
138 _virt_pg_map(vm, vaddr, paddr, attr_idx);
139}
140
141vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
142{
143 uint64_t *ptep;
144
145 if (!vm->pgd_created)
146 goto unmapped_gva;
147
148 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
149 if (!ptep)
150 goto unmapped_gva;
151
152 switch (vm->pgtable_levels) {
153 case 4:
154 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
155 if (!ptep)
156 goto unmapped_gva;
157
158 case 3:
159 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
160 if (!ptep)
161 goto unmapped_gva;
162
163 case 2:
164 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
165 if (!ptep)
166 goto unmapped_gva;
167 break;
168 default:
169 TEST_FAIL("Page table levels must be 2, 3, or 4");
170 }
171
172 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
173
174unmapped_gva:
175 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
176 exit(1);
177}
178
179static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
180{
181#ifdef DEBUG
182 static const char * const type[] = { "", "pud", "pmd", "pte" };
183 uint64_t pte, *ptep;
184
185 if (level == 4)
186 return;
187
188 for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
189 ptep = addr_gpa2hva(vm, pte);
190 if (!*ptep)
191 continue;
192 fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
193 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
194 }
195#endif
196}
197
198void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
199{
200 int level = 4 - (vm->pgtable_levels - 1);
201 uint64_t pgd, *ptep;
202
203 if (!vm->pgd_created)
204 return;
205
206 for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
207 ptep = addr_gpa2hva(vm, pgd);
208 if (!*ptep)
209 continue;
210 fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
211 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
212 }
213}
214
215void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *init)
216{
217 struct kvm_vcpu_init default_init = { .target = -1, };
218 uint64_t sctlr_el1, tcr_el1;
219
220 if (!init)
221 init = &default_init;
222
223 if (init->target == -1) {
224 struct kvm_vcpu_init preferred;
225 vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
226 init->target = preferred.target;
227 }
228
229 vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, init);
230
231
232
233
234
235 set_reg(vm, vcpuid, ARM64_SYS_REG(CPACR_EL1), 3 << 20);
236
237 get_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), &sctlr_el1);
238 get_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), &tcr_el1);
239
240 switch (vm->mode) {
241 case VM_MODE_P52V48_4K:
242 TEST_FAIL("AArch64 does not support 4K sized pages "
243 "with 52-bit physical address ranges");
244 case VM_MODE_PXXV48_4K:
245 TEST_FAIL("AArch64 does not support 4K sized pages "
246 "with ANY-bit physical address ranges");
247 case VM_MODE_P52V48_64K:
248 tcr_el1 |= 1ul << 14;
249 tcr_el1 |= 6ul << 32;
250 break;
251 case VM_MODE_P48V48_4K:
252 tcr_el1 |= 0ul << 14;
253 tcr_el1 |= 5ul << 32;
254 break;
255 case VM_MODE_P48V48_64K:
256 tcr_el1 |= 1ul << 14;
257 tcr_el1 |= 5ul << 32;
258 break;
259 case VM_MODE_P40V48_4K:
260 tcr_el1 |= 0ul << 14;
261 tcr_el1 |= 2ul << 32;
262 break;
263 case VM_MODE_P40V48_64K:
264 tcr_el1 |= 1ul << 14;
265 tcr_el1 |= 2ul << 32;
266 break;
267 default:
268 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
269 }
270
271 sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) ;
272 ;
273 tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
274 tcr_el1 |= (64 - vm->va_bits) ;
275
276 set_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), sctlr_el1);
277 set_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), tcr_el1);
278 set_reg(vm, vcpuid, ARM64_SYS_REG(MAIR_EL1), DEFAULT_MAIR_EL1);
279 set_reg(vm, vcpuid, ARM64_SYS_REG(TTBR0_EL1), vm->pgd);
280}
281
282void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
283{
284 uint64_t pstate, pc;
285
286 get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
287 get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
288
289 fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
290 indent, "", pstate, pc);
291}
292
293void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
294 struct kvm_vcpu_init *init, void *guest_code)
295{
296 size_t stack_size = vm->page_size == 4096 ?
297 DEFAULT_STACK_PGS * vm->page_size :
298 vm->page_size;
299 uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
300 DEFAULT_ARM64_GUEST_STACK_VADDR_MIN);
301
302 vm_vcpu_add(vm, vcpuid);
303 aarch64_vcpu_setup(vm, vcpuid, init);
304
305 set_reg(vm, vcpuid, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
306 set_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
307}
308
309void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
310{
311 aarch64_vcpu_add_default(vm, vcpuid, NULL, guest_code);
312}
313
314void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
315{
316 va_list ap;
317 int i;
318
319 TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
320 " num: %u\n", num);
321
322 va_start(ap, num);
323
324 for (i = 0; i < num; i++) {
325 set_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[i]),
326 va_arg(ap, uint64_t));
327 }
328
329 va_end(ap);
330}
331
332void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
333{
334 ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
335 while (1)
336 ;
337}
338
339void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
340{
341 struct ucall uc;
342
343 if (get_ucall(vm, vcpuid, &uc) != UCALL_UNHANDLED)
344 return;
345
346 if (uc.args[2]) {
347 assert(VECTOR_IS_SYNC(uc.args[0]));
348 TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
349 uc.args[0], uc.args[1]);
350 } else {
351 assert(!VECTOR_IS_SYNC(uc.args[0]));
352 TEST_FAIL("Unexpected exception (vector:0x%lx)",
353 uc.args[0]);
354 }
355}
356
357struct handlers {
358 handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM];
359};
360
361void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
362{
363 extern char vectors;
364
365 set_reg(vm, vcpuid, ARM64_SYS_REG(VBAR_EL1), (uint64_t)&vectors);
366}
367
368void route_exception(struct ex_regs *regs, int vector)
369{
370 struct handlers *handlers = (struct handlers *)exception_handlers;
371 bool valid_ec;
372 int ec = 0;
373
374 switch (vector) {
375 case VECTOR_SYNC_CURRENT:
376 case VECTOR_SYNC_LOWER_64:
377 ec = (read_sysreg(esr_el1) >> ESR_EC_SHIFT) & ESR_EC_MASK;
378 valid_ec = true;
379 break;
380 case VECTOR_IRQ_CURRENT:
381 case VECTOR_IRQ_LOWER_64:
382 case VECTOR_FIQ_CURRENT:
383 case VECTOR_FIQ_LOWER_64:
384 case VECTOR_ERROR_CURRENT:
385 case VECTOR_ERROR_LOWER_64:
386 ec = 0;
387 valid_ec = false;
388 break;
389 default:
390 valid_ec = false;
391 goto unexpected_exception;
392 }
393
394 if (handlers && handlers->exception_handlers[vector][ec])
395 return handlers->exception_handlers[vector][ec](regs);
396
397unexpected_exception:
398 kvm_exit_unexpected_exception(vector, ec, valid_ec);
399}
400
401void vm_init_descriptor_tables(struct kvm_vm *vm)
402{
403 vm->handlers = vm_vaddr_alloc(vm, sizeof(struct handlers),
404 vm->page_size);
405
406 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
407}
408
409void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
410 void (*handler)(struct ex_regs *))
411{
412 struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
413
414 assert(VECTOR_IS_SYNC(vector));
415 assert(vector < VECTOR_NUM);
416 assert(ec < ESR_EC_NUM);
417 handlers->exception_handlers[vector][ec] = handler;
418}
419
420void vm_install_exception_handler(struct kvm_vm *vm, int vector,
421 void (*handler)(struct ex_regs *))
422{
423 struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
424
425 assert(!VECTOR_IS_SYNC(vector));
426 assert(vector < VECTOR_NUM);
427 handlers->exception_handlers[vector][0] = handler;
428}
429