1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "trace.h"
21#include "exec/log.h"
22#include "qemu.h"
23#include "user-internals.h"
24#include "user-mmap.h"
25
26static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
27static __thread int mmap_lock_count;
28
29void mmap_lock(void)
30{
31 if (mmap_lock_count++ == 0) {
32 pthread_mutex_lock(&mmap_mutex);
33 }
34}
35
36void mmap_unlock(void)
37{
38 if (--mmap_lock_count == 0) {
39 pthread_mutex_unlock(&mmap_mutex);
40 }
41}
42
43bool have_mmap_lock(void)
44{
45 return mmap_lock_count > 0 ? true : false;
46}
47
48
49void mmap_fork_start(void)
50{
51 if (mmap_lock_count)
52 abort();
53 pthread_mutex_lock(&mmap_mutex);
54}
55
56void mmap_fork_end(int child)
57{
58 if (child)
59 pthread_mutex_init(&mmap_mutex, NULL);
60 else
61 pthread_mutex_unlock(&mmap_mutex);
62}
63
64
65
66
67
68
69
70static int validate_prot_to_pageflags(int *host_prot, int prot)
71{
72 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
73 int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
74
75
76
77
78
79
80
81
82
83
84
85 *host_prot = (prot & (PROT_READ | PROT_WRITE))
86 | (prot & PROT_EXEC ? PROT_READ : 0);
87
88#ifdef TARGET_AARCH64
89 {
90 ARMCPU *cpu = ARM_CPU(thread_cpu);
91
92
93
94
95
96
97
98 if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
99 valid |= TARGET_PROT_BTI;
100 page_flags |= PAGE_BTI;
101 }
102
103 if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
104 valid |= TARGET_PROT_MTE;
105 page_flags |= PAGE_MTE;
106 }
107 }
108#endif
109
110 return prot & ~valid ? 0 : page_flags;
111}
112
113
114int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
115{
116 abi_ulong end, host_start, host_end, addr;
117 int prot1, ret, page_flags, host_prot;
118
119 trace_target_mprotect(start, len, target_prot);
120
121 if ((start & ~TARGET_PAGE_MASK) != 0) {
122 return -TARGET_EINVAL;
123 }
124 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
125 if (!page_flags) {
126 return -TARGET_EINVAL;
127 }
128 len = TARGET_PAGE_ALIGN(len);
129 end = start + len;
130 if (!guest_range_valid_untagged(start, len)) {
131 return -TARGET_ENOMEM;
132 }
133 if (len == 0) {
134 return 0;
135 }
136
137 mmap_lock();
138 host_start = start & qemu_host_page_mask;
139 host_end = HOST_PAGE_ALIGN(end);
140 if (start > host_start) {
141
142 prot1 = host_prot;
143 for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
144 prot1 |= page_get_flags(addr);
145 }
146 if (host_end == host_start + qemu_host_page_size) {
147 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
148 prot1 |= page_get_flags(addr);
149 }
150 end = host_end;
151 }
152 ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
153 prot1 & PAGE_BITS);
154 if (ret != 0) {
155 goto error;
156 }
157 host_start += qemu_host_page_size;
158 }
159 if (end < host_end) {
160 prot1 = host_prot;
161 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
162 prot1 |= page_get_flags(addr);
163 }
164 ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
165 qemu_host_page_size, prot1 & PAGE_BITS);
166 if (ret != 0) {
167 goto error;
168 }
169 host_end -= qemu_host_page_size;
170 }
171
172
173 if (host_start < host_end) {
174 ret = mprotect(g2h_untagged(host_start),
175 host_end - host_start, host_prot);
176 if (ret != 0) {
177 goto error;
178 }
179 }
180 page_set_flags(start, start + len, page_flags);
181 mmap_unlock();
182 return 0;
183error:
184 mmap_unlock();
185 return ret;
186}
187
188
189static int mmap_frag(abi_ulong real_start,
190 abi_ulong start, abi_ulong end,
191 int prot, int flags, int fd, abi_ulong offset)
192{
193 abi_ulong real_end, addr;
194 void *host_start;
195 int prot1, prot_new;
196
197 real_end = real_start + qemu_host_page_size;
198 host_start = g2h_untagged(real_start);
199
200
201 prot1 = 0;
202 for(addr = real_start; addr < real_end; addr++) {
203 if (addr < start || addr >= end)
204 prot1 |= page_get_flags(addr);
205 }
206
207 if (prot1 == 0) {
208
209 void *p = mmap(host_start, qemu_host_page_size, prot,
210 flags | MAP_ANONYMOUS, -1, 0);
211 if (p == MAP_FAILED)
212 return -1;
213 prot1 = prot;
214 }
215 prot1 &= PAGE_BITS;
216
217 prot_new = prot | prot1;
218 if (!(flags & MAP_ANONYMOUS)) {
219
220
221 if ((flags & MAP_TYPE) == MAP_SHARED &&
222 (prot & PROT_WRITE))
223 return -1;
224
225
226 if (!(prot1 & PROT_WRITE))
227 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
228
229
230 if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
231 return -1;
232
233
234 if (prot_new != (prot1 | PROT_WRITE))
235 mprotect(host_start, qemu_host_page_size, prot_new);
236 } else {
237 if (prot_new != prot1) {
238 mprotect(host_start, qemu_host_page_size, prot_new);
239 }
240 if (prot_new & PROT_WRITE) {
241 memset(g2h_untagged(start), 0, end - start);
242 }
243 }
244 return 0;
245}
246
247#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
248#ifdef TARGET_AARCH64
249# define TASK_UNMAPPED_BASE 0x5500000000
250#else
251# define TASK_UNMAPPED_BASE (1ul << 38)
252#endif
253#else
254# define TASK_UNMAPPED_BASE 0x40000000
255#endif
256abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
257
258unsigned long last_brk;
259
260
261
262static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
263 abi_ulong align)
264{
265 abi_ulong addr, end_addr, incr = qemu_host_page_size;
266 int prot;
267 bool looped = false;
268
269 if (size > reserved_va) {
270 return (abi_ulong)-1;
271 }
272
273
274
275 end_addr = start + size;
276 if (start > reserved_va - size) {
277
278 end_addr = ((reserved_va - size) & -align) + size;
279 looped = true;
280 }
281
282
283 addr = end_addr;
284 while (1) {
285 addr -= incr;
286 if (addr > end_addr) {
287 if (looped) {
288
289 return (abi_ulong)-1;
290 }
291
292 addr = end_addr = ((reserved_va - size) & -align) + size;
293 looped = true;
294 } else {
295 prot = page_get_flags(addr);
296 if (prot) {
297
298 addr = end_addr = ((addr - size) & -align) + size;
299 } else if (addr && addr + size == end_addr) {
300
301 if (start == mmap_next_start) {
302 mmap_next_start = addr;
303 }
304 return addr;
305 }
306 }
307 }
308}
309
310
311
312
313
314
315
316abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
317{
318 void *ptr, *prev;
319 abi_ulong addr;
320 int wrapped, repeat;
321
322 align = MAX(align, qemu_host_page_size);
323
324
325 if (start == 0) {
326 start = mmap_next_start;
327 } else {
328 start &= qemu_host_page_mask;
329 }
330 start = ROUND_UP(start, align);
331
332 size = HOST_PAGE_ALIGN(size);
333
334 if (reserved_va) {
335 return mmap_find_vma_reserved(start, size, align);
336 }
337
338 addr = start;
339 wrapped = repeat = 0;
340 prev = 0;
341
342 for (;; prev = ptr) {
343
344
345
346
347
348
349
350 ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
351 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
352
353
354 if (ptr == MAP_FAILED) {
355 return (abi_ulong)-1;
356 }
357
358
359
360 repeat = (ptr == prev ? repeat + 1 : 0);
361
362 if (h2g_valid(ptr + size - 1)) {
363 addr = h2g(ptr);
364
365 if ((addr & (align - 1)) == 0) {
366
367 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
368 mmap_next_start = addr + size;
369 }
370 return addr;
371 }
372
373
374 switch (repeat) {
375 case 0:
376
377
378
379 addr = ROUND_UP(addr, align);
380 break;
381 case 1:
382
383
384 addr &= -align;
385 break;
386 case 2:
387
388 addr = 0;
389 break;
390 default:
391
392 addr = -1;
393 break;
394 }
395 } else {
396
397
398 addr = (repeat ? -1 : 0);
399 }
400
401
402 munmap(ptr, size);
403
404
405 if (addr == (abi_ulong)-1) {
406 return (abi_ulong)-1;
407 } else if (addr == 0) {
408 if (wrapped) {
409 return (abi_ulong)-1;
410 }
411 wrapped = 1;
412
413
414 addr = (mmap_min_addr > TARGET_PAGE_SIZE
415 ? TARGET_PAGE_ALIGN(mmap_min_addr)
416 : TARGET_PAGE_SIZE);
417 } else if (wrapped && addr >= start) {
418 return (abi_ulong)-1;
419 }
420 }
421}
422
423
424abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
425 int flags, int fd, abi_ulong offset)
426{
427 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
428 int page_flags, host_prot;
429
430 mmap_lock();
431 trace_target_mmap(start, len, target_prot, flags, fd, offset);
432
433 if (!len) {
434 errno = EINVAL;
435 goto fail;
436 }
437
438 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
439 if (!page_flags) {
440 errno = EINVAL;
441 goto fail;
442 }
443
444
445 len = TARGET_PAGE_ALIGN(len);
446 if (!len) {
447 errno = ENOMEM;
448 goto fail;
449 }
450
451 if (offset & ~TARGET_PAGE_MASK) {
452 errno = EINVAL;
453 goto fail;
454 }
455
456
457
458
459
460
461
462 if (flags & MAP_SHARED) {
463 CPUState *cpu = thread_cpu;
464 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
465 cpu->tcg_cflags |= CF_PARALLEL;
466 tb_flush(cpu);
467 }
468 }
469
470 real_start = start & qemu_host_page_mask;
471 host_offset = offset & qemu_host_page_mask;
472
473
474
475 if (!(flags & MAP_FIXED)) {
476 host_len = len + offset - host_offset;
477 host_len = HOST_PAGE_ALIGN(host_len);
478 start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
479 if (start == (abi_ulong)-1) {
480 errno = ENOMEM;
481 goto fail;
482 }
483 }
484
485
486
487
488
489
490
491
492
493
494
495
496
497 if ((qemu_real_host_page_size < qemu_host_page_size) &&
498 !(flags & MAP_ANONYMOUS)) {
499 struct stat sb;
500
501 if (fstat (fd, &sb) == -1)
502 goto fail;
503
504
505 if (offset + len > sb.st_size) {
506
507
508
509 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
510 }
511 }
512
513 if (!(flags & MAP_FIXED)) {
514 unsigned long host_start;
515 void *p;
516
517 host_len = len + offset - host_offset;
518 host_len = HOST_PAGE_ALIGN(host_len);
519
520
521
522
523 p = mmap(g2h_untagged(start), host_len, host_prot,
524 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
525 if (p == MAP_FAILED) {
526 goto fail;
527 }
528
529 host_start = (unsigned long)p;
530 if (!(flags & MAP_ANONYMOUS)) {
531 p = mmap(g2h_untagged(start), len, host_prot,
532 flags | MAP_FIXED, fd, host_offset);
533 if (p == MAP_FAILED) {
534 munmap(g2h_untagged(start), host_len);
535 goto fail;
536 }
537 host_start += offset - host_offset;
538 }
539 start = h2g(host_start);
540 } else {
541 if (start & ~TARGET_PAGE_MASK) {
542 errno = EINVAL;
543 goto fail;
544 }
545 end = start + len;
546 real_end = HOST_PAGE_ALIGN(end);
547
548
549
550
551
552
553 if (end < start || !guest_range_valid_untagged(start, len)) {
554 errno = ENOMEM;
555 goto fail;
556 }
557
558
559
560 if (!(flags & MAP_ANONYMOUS) &&
561 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
562
563
564 if ((flags & MAP_TYPE) == MAP_SHARED &&
565 (host_prot & PROT_WRITE)) {
566 errno = EINVAL;
567 goto fail;
568 }
569 retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
570 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
571 -1, 0);
572 if (retaddr == -1)
573 goto fail;
574 if (pread(fd, g2h_untagged(start), len, offset) == -1)
575 goto fail;
576 if (!(host_prot & PROT_WRITE)) {
577 ret = target_mprotect(start, len, target_prot);
578 assert(ret == 0);
579 }
580 goto the_end;
581 }
582
583
584 if (start > real_start) {
585 if (real_end == real_start + qemu_host_page_size) {
586
587 ret = mmap_frag(real_start, start, end,
588 host_prot, flags, fd, offset);
589 if (ret == -1)
590 goto fail;
591 goto the_end1;
592 }
593 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
594 host_prot, flags, fd, offset);
595 if (ret == -1)
596 goto fail;
597 real_start += qemu_host_page_size;
598 }
599
600 if (end < real_end) {
601 ret = mmap_frag(real_end - qemu_host_page_size,
602 real_end - qemu_host_page_size, end,
603 host_prot, flags, fd,
604 offset + real_end - qemu_host_page_size - start);
605 if (ret == -1)
606 goto fail;
607 real_end -= qemu_host_page_size;
608 }
609
610
611 if (real_start < real_end) {
612 void *p;
613 unsigned long offset1;
614 if (flags & MAP_ANONYMOUS)
615 offset1 = 0;
616 else
617 offset1 = offset + real_start - start;
618 p = mmap(g2h_untagged(real_start), real_end - real_start,
619 host_prot, flags, fd, offset1);
620 if (p == MAP_FAILED)
621 goto fail;
622 }
623 }
624 the_end1:
625 if (flags & MAP_ANONYMOUS) {
626 page_flags |= PAGE_ANON;
627 }
628 page_flags |= PAGE_RESET;
629 page_set_flags(start, start + len, page_flags);
630 the_end:
631 trace_target_mmap_complete(start);
632 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
633 log_page_dump(__func__);
634 }
635 tb_invalidate_phys_range(start, start + len);
636 mmap_unlock();
637 return start;
638fail:
639 mmap_unlock();
640 return -1;
641}
642
643static void mmap_reserve(abi_ulong start, abi_ulong size)
644{
645 abi_ulong real_start;
646 abi_ulong real_end;
647 abi_ulong addr;
648 abi_ulong end;
649 int prot;
650
651 real_start = start & qemu_host_page_mask;
652 real_end = HOST_PAGE_ALIGN(start + size);
653 end = start + size;
654 if (start > real_start) {
655
656 prot = 0;
657 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
658 prot |= page_get_flags(addr);
659 }
660 if (real_end == real_start + qemu_host_page_size) {
661 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
662 prot |= page_get_flags(addr);
663 }
664 end = real_end;
665 }
666 if (prot != 0)
667 real_start += qemu_host_page_size;
668 }
669 if (end < real_end) {
670 prot = 0;
671 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
672 prot |= page_get_flags(addr);
673 }
674 if (prot != 0)
675 real_end -= qemu_host_page_size;
676 }
677 if (real_start != real_end) {
678 mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
679 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
680 -1, 0);
681 }
682}
683
684int target_munmap(abi_ulong start, abi_ulong len)
685{
686 abi_ulong end, real_start, real_end, addr;
687 int prot, ret;
688
689 trace_target_munmap(start, len);
690
691 if (start & ~TARGET_PAGE_MASK)
692 return -TARGET_EINVAL;
693 len = TARGET_PAGE_ALIGN(len);
694 if (len == 0 || !guest_range_valid_untagged(start, len)) {
695 return -TARGET_EINVAL;
696 }
697
698 mmap_lock();
699 end = start + len;
700 real_start = start & qemu_host_page_mask;
701 real_end = HOST_PAGE_ALIGN(end);
702
703 if (start > real_start) {
704
705 prot = 0;
706 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
707 prot |= page_get_flags(addr);
708 }
709 if (real_end == real_start + qemu_host_page_size) {
710 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
711 prot |= page_get_flags(addr);
712 }
713 end = real_end;
714 }
715 if (prot != 0)
716 real_start += qemu_host_page_size;
717 }
718 if (end < real_end) {
719 prot = 0;
720 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
721 prot |= page_get_flags(addr);
722 }
723 if (prot != 0)
724 real_end -= qemu_host_page_size;
725 }
726
727 ret = 0;
728
729 if (real_start < real_end) {
730 if (reserved_va) {
731 mmap_reserve(real_start, real_end - real_start);
732 } else {
733 ret = munmap(g2h_untagged(real_start), real_end - real_start);
734 }
735 }
736
737 if (ret == 0) {
738 page_set_flags(start, start + len, 0);
739 tb_invalidate_phys_range(start, start + len);
740 }
741 mmap_unlock();
742 return ret;
743}
744
745abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
746 abi_ulong new_size, unsigned long flags,
747 abi_ulong new_addr)
748{
749 int prot;
750 void *host_addr;
751
752 if (!guest_range_valid_untagged(old_addr, old_size) ||
753 ((flags & MREMAP_FIXED) &&
754 !guest_range_valid_untagged(new_addr, new_size)) ||
755 ((flags & MREMAP_MAYMOVE) == 0 &&
756 !guest_range_valid_untagged(old_addr, new_size))) {
757 errno = ENOMEM;
758 return -1;
759 }
760
761 mmap_lock();
762
763 if (flags & MREMAP_FIXED) {
764 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
765 flags, g2h_untagged(new_addr));
766
767 if (reserved_va && host_addr != MAP_FAILED) {
768
769
770 mmap_reserve(old_addr, old_size);
771 }
772 } else if (flags & MREMAP_MAYMOVE) {
773 abi_ulong mmap_start;
774
775 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
776
777 if (mmap_start == -1) {
778 errno = ENOMEM;
779 host_addr = MAP_FAILED;
780 } else {
781 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
782 flags | MREMAP_FIXED,
783 g2h_untagged(mmap_start));
784 if (reserved_va) {
785 mmap_reserve(old_addr, old_size);
786 }
787 }
788 } else {
789 int prot = 0;
790 if (reserved_va && old_size < new_size) {
791 abi_ulong addr;
792 for (addr = old_addr + old_size;
793 addr < old_addr + new_size;
794 addr++) {
795 prot |= page_get_flags(addr);
796 }
797 }
798 if (prot == 0) {
799 host_addr = mremap(g2h_untagged(old_addr),
800 old_size, new_size, flags);
801
802 if (host_addr != MAP_FAILED) {
803
804 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
805
806 host_addr = mremap(g2h_untagged(old_addr),
807 new_size, old_size, flags);
808 errno = ENOMEM;
809 host_addr = MAP_FAILED;
810 } else if (reserved_va && old_size > new_size) {
811 mmap_reserve(old_addr + old_size, old_size - new_size);
812 }
813 }
814 } else {
815 errno = ENOMEM;
816 host_addr = MAP_FAILED;
817 }
818 }
819
820 if (host_addr == MAP_FAILED) {
821 new_addr = -1;
822 } else {
823 new_addr = h2g(host_addr);
824 prot = page_get_flags(old_addr);
825 page_set_flags(old_addr, old_addr + old_size, 0);
826 page_set_flags(new_addr, new_addr + new_size,
827 prot | PAGE_VALID | PAGE_RESET);
828 }
829 tb_invalidate_phys_range(new_addr, new_addr + new_size);
830 mmap_unlock();
831 return new_addr;
832}
833