1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "trace.h"
21#include "exec/log.h"
22#include "qemu.h"
23#include "user-internals.h"
24#include "user-mmap.h"
25
26static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
27static __thread int mmap_lock_count;
28
29void mmap_lock(void)
30{
31 if (mmap_lock_count++ == 0) {
32 pthread_mutex_lock(&mmap_mutex);
33 }
34}
35
36void mmap_unlock(void)
37{
38 if (--mmap_lock_count == 0) {
39 pthread_mutex_unlock(&mmap_mutex);
40 }
41}
42
43bool have_mmap_lock(void)
44{
45 return mmap_lock_count > 0 ? true : false;
46}
47
48
49void mmap_fork_start(void)
50{
51 if (mmap_lock_count)
52 abort();
53 pthread_mutex_lock(&mmap_mutex);
54}
55
56void mmap_fork_end(int child)
57{
58 if (child)
59 pthread_mutex_init(&mmap_mutex, NULL);
60 else
61 pthread_mutex_unlock(&mmap_mutex);
62}
63
64
65
66
67
68
69
70static int validate_prot_to_pageflags(int *host_prot, int prot)
71{
72 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
73 int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
74
75
76
77
78
79
80
81
82
83
84
85 *host_prot = (prot & (PROT_READ | PROT_WRITE))
86 | (prot & PROT_EXEC ? PROT_READ : 0);
87
88#ifdef TARGET_AARCH64
89 {
90 ARMCPU *cpu = ARM_CPU(thread_cpu);
91
92
93
94
95
96
97
98 if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
99 valid |= TARGET_PROT_BTI;
100 page_flags |= PAGE_BTI;
101 }
102
103 if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
104 valid |= TARGET_PROT_MTE;
105 page_flags |= PAGE_MTE;
106 }
107 }
108#endif
109
110 return prot & ~valid ? 0 : page_flags;
111}
112
113
114int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
115{
116 abi_ulong end, host_start, host_end, addr;
117 int prot1, ret, page_flags, host_prot;
118
119 trace_target_mprotect(start, len, target_prot);
120
121 if ((start & ~TARGET_PAGE_MASK) != 0) {
122 return -TARGET_EINVAL;
123 }
124 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
125 if (!page_flags) {
126 return -TARGET_EINVAL;
127 }
128 len = TARGET_PAGE_ALIGN(len);
129 end = start + len;
130 if (!guest_range_valid_untagged(start, len)) {
131 return -TARGET_ENOMEM;
132 }
133 if (len == 0) {
134 return 0;
135 }
136
137 mmap_lock();
138 host_start = start & qemu_host_page_mask;
139 host_end = HOST_PAGE_ALIGN(end);
140 if (start > host_start) {
141
142 prot1 = host_prot;
143 for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
144 prot1 |= page_get_flags(addr);
145 }
146 if (host_end == host_start + qemu_host_page_size) {
147 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
148 prot1 |= page_get_flags(addr);
149 }
150 end = host_end;
151 }
152 ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
153 prot1 & PAGE_BITS);
154 if (ret != 0) {
155 goto error;
156 }
157 host_start += qemu_host_page_size;
158 }
159 if (end < host_end) {
160 prot1 = host_prot;
161 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
162 prot1 |= page_get_flags(addr);
163 }
164 ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
165 qemu_host_page_size, prot1 & PAGE_BITS);
166 if (ret != 0) {
167 goto error;
168 }
169 host_end -= qemu_host_page_size;
170 }
171
172
173 if (host_start < host_end) {
174 ret = mprotect(g2h_untagged(host_start),
175 host_end - host_start, host_prot);
176 if (ret != 0) {
177 goto error;
178 }
179 }
180 page_set_flags(start, start + len, page_flags);
181 mmap_unlock();
182 return 0;
183error:
184 mmap_unlock();
185 return ret;
186}
187
188
189static int mmap_frag(abi_ulong real_start,
190 abi_ulong start, abi_ulong end,
191 int prot, int flags, int fd, abi_ulong offset)
192{
193 abi_ulong real_end, addr;
194 void *host_start;
195 int prot1, prot_new;
196
197 real_end = real_start + qemu_host_page_size;
198 host_start = g2h_untagged(real_start);
199
200
201 prot1 = 0;
202 for(addr = real_start; addr < real_end; addr++) {
203 if (addr < start || addr >= end)
204 prot1 |= page_get_flags(addr);
205 }
206
207 if (prot1 == 0) {
208
209 void *p = mmap(host_start, qemu_host_page_size, prot,
210 flags | MAP_ANONYMOUS, -1, 0);
211 if (p == MAP_FAILED)
212 return -1;
213 prot1 = prot;
214 }
215 prot1 &= PAGE_BITS;
216
217 prot_new = prot | prot1;
218 if (!(flags & MAP_ANONYMOUS)) {
219
220
221 if ((flags & MAP_TYPE) == MAP_SHARED &&
222 (prot & PROT_WRITE))
223 return -1;
224
225
226 if (!(prot1 & PROT_WRITE))
227 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
228
229
230 if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
231 return -1;
232
233
234 if (prot_new != (prot1 | PROT_WRITE))
235 mprotect(host_start, qemu_host_page_size, prot_new);
236 } else {
237 if (prot_new != prot1) {
238 mprotect(host_start, qemu_host_page_size, prot_new);
239 }
240 if (prot_new & PROT_WRITE) {
241 memset(g2h_untagged(start), 0, end - start);
242 }
243 }
244 return 0;
245}
246
247#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
248#ifdef TARGET_AARCH64
249# define TASK_UNMAPPED_BASE 0x5500000000
250#else
251# define TASK_UNMAPPED_BASE (1ul << 38)
252#endif
253#else
254# define TASK_UNMAPPED_BASE 0x40000000
255#endif
256abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
257
258unsigned long last_brk;
259
260
261
262static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
263 abi_ulong align)
264{
265 abi_ulong addr, end_addr, incr = qemu_host_page_size;
266 int prot;
267 bool looped = false;
268
269 if (size > reserved_va) {
270 return (abi_ulong)-1;
271 }
272
273
274
275 end_addr = start + size;
276 if (start > reserved_va - size) {
277
278 end_addr = ((reserved_va - size) & -align) + size;
279 looped = true;
280 }
281
282
283 addr = end_addr;
284 while (1) {
285 addr -= incr;
286 if (addr > end_addr) {
287 if (looped) {
288
289 return (abi_ulong)-1;
290 }
291
292 addr = end_addr = ((reserved_va - size) & -align) + size;
293 looped = true;
294 } else {
295 prot = page_get_flags(addr);
296 if (prot) {
297
298 addr = end_addr = ((addr - size) & -align) + size;
299 } else if (addr && addr + size == end_addr) {
300
301 if (start == mmap_next_start) {
302 mmap_next_start = addr;
303 }
304 return addr;
305 }
306 }
307 }
308}
309
310
311
312
313
314
315
316abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
317{
318 void *ptr, *prev;
319 abi_ulong addr;
320 int wrapped, repeat;
321
322 align = MAX(align, qemu_host_page_size);
323
324
325 if (start == 0) {
326 start = mmap_next_start;
327 } else {
328 start &= qemu_host_page_mask;
329 }
330 start = ROUND_UP(start, align);
331
332 size = HOST_PAGE_ALIGN(size);
333
334 if (reserved_va) {
335 return mmap_find_vma_reserved(start, size, align);
336 }
337
338 addr = start;
339 wrapped = repeat = 0;
340 prev = 0;
341
342 for (;; prev = ptr) {
343
344
345
346
347
348
349
350 ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
351 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
352
353
354 if (ptr == MAP_FAILED) {
355 return (abi_ulong)-1;
356 }
357
358
359
360 repeat = (ptr == prev ? repeat + 1 : 0);
361
362 if (h2g_valid(ptr + size - 1)) {
363 addr = h2g(ptr);
364
365 if ((addr & (align - 1)) == 0) {
366
367 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
368 mmap_next_start = addr + size;
369 }
370 return addr;
371 }
372
373
374 switch (repeat) {
375 case 0:
376
377
378
379 addr = ROUND_UP(addr, align);
380 break;
381 case 1:
382
383
384 addr &= -align;
385 break;
386 case 2:
387
388 addr = 0;
389 break;
390 default:
391
392 addr = -1;
393 break;
394 }
395 } else {
396
397
398 addr = (repeat ? -1 : 0);
399 }
400
401
402 munmap(ptr, size);
403
404
405 if (addr == (abi_ulong)-1) {
406 return (abi_ulong)-1;
407 } else if (addr == 0) {
408 if (wrapped) {
409 return (abi_ulong)-1;
410 }
411 wrapped = 1;
412
413
414 addr = (mmap_min_addr > TARGET_PAGE_SIZE
415 ? TARGET_PAGE_ALIGN(mmap_min_addr)
416 : TARGET_PAGE_SIZE);
417 } else if (wrapped && addr >= start) {
418 return (abi_ulong)-1;
419 }
420 }
421}
422
423
424abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
425 int flags, int fd, abi_ulong offset)
426{
427 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
428 int page_flags, host_prot;
429
430 mmap_lock();
431 trace_target_mmap(start, len, target_prot, flags, fd, offset);
432
433 if (!len) {
434 errno = EINVAL;
435 goto fail;
436 }
437
438 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
439 if (!page_flags) {
440 errno = EINVAL;
441 goto fail;
442 }
443
444
445 len = TARGET_PAGE_ALIGN(len);
446 if (!len) {
447 errno = ENOMEM;
448 goto fail;
449 }
450
451 if (offset & ~TARGET_PAGE_MASK) {
452 errno = EINVAL;
453 goto fail;
454 }
455
456
457
458
459
460
461
462 if (flags & MAP_SHARED) {
463 CPUState *cpu = thread_cpu;
464 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
465 cpu->tcg_cflags |= CF_PARALLEL;
466 tb_flush(cpu);
467 }
468 }
469
470 real_start = start & qemu_host_page_mask;
471 host_offset = offset & qemu_host_page_mask;
472
473
474
475 if (!(flags & MAP_FIXED)) {
476 host_len = len + offset - host_offset;
477 host_len = HOST_PAGE_ALIGN(host_len);
478 start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
479 if (start == (abi_ulong)-1) {
480 errno = ENOMEM;
481 goto fail;
482 }
483 }
484
485
486
487
488
489
490
491
492
493
494
495
496
497 if ((qemu_real_host_page_size() < qemu_host_page_size) &&
498 !(flags & MAP_ANONYMOUS)) {
499 struct stat sb;
500
501 if (fstat (fd, &sb) == -1)
502 goto fail;
503
504
505 if (offset + len > sb.st_size) {
506
507
508
509 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
510 }
511 }
512
513 if (!(flags & MAP_FIXED)) {
514 unsigned long host_start;
515 void *p;
516
517 host_len = len + offset - host_offset;
518 host_len = HOST_PAGE_ALIGN(host_len);
519
520
521
522
523 p = mmap(g2h_untagged(start), host_len, host_prot,
524 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
525 if (p == MAP_FAILED) {
526 goto fail;
527 }
528
529 host_start = (unsigned long)p;
530 if (!(flags & MAP_ANONYMOUS)) {
531 p = mmap(g2h_untagged(start), len, host_prot,
532 flags | MAP_FIXED, fd, host_offset);
533 if (p == MAP_FAILED) {
534 munmap(g2h_untagged(start), host_len);
535 goto fail;
536 }
537 host_start += offset - host_offset;
538 }
539 start = h2g(host_start);
540 } else {
541 if (start & ~TARGET_PAGE_MASK) {
542 errno = EINVAL;
543 goto fail;
544 }
545 end = start + len;
546 real_end = HOST_PAGE_ALIGN(end);
547
548
549
550
551
552
553 if (end < start || !guest_range_valid_untagged(start, len)) {
554 errno = ENOMEM;
555 goto fail;
556 }
557
558
559
560 if (!(flags & MAP_ANONYMOUS) &&
561 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
562
563
564 if ((flags & MAP_TYPE) == MAP_SHARED &&
565 (host_prot & PROT_WRITE)) {
566 errno = EINVAL;
567 goto fail;
568 }
569 retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
570 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
571 -1, 0);
572 if (retaddr == -1)
573 goto fail;
574 if (pread(fd, g2h_untagged(start), len, offset) == -1)
575 goto fail;
576 if (!(host_prot & PROT_WRITE)) {
577 ret = target_mprotect(start, len, target_prot);
578 assert(ret == 0);
579 }
580 goto the_end;
581 }
582
583
584 if (start > real_start) {
585 if (real_end == real_start + qemu_host_page_size) {
586
587 ret = mmap_frag(real_start, start, end,
588 host_prot, flags, fd, offset);
589 if (ret == -1)
590 goto fail;
591 goto the_end1;
592 }
593 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
594 host_prot, flags, fd, offset);
595 if (ret == -1)
596 goto fail;
597 real_start += qemu_host_page_size;
598 }
599
600 if (end < real_end) {
601 ret = mmap_frag(real_end - qemu_host_page_size,
602 real_end - qemu_host_page_size, end,
603 host_prot, flags, fd,
604 offset + real_end - qemu_host_page_size - start);
605 if (ret == -1)
606 goto fail;
607 real_end -= qemu_host_page_size;
608 }
609
610
611 if (real_start < real_end) {
612 void *p;
613 unsigned long offset1;
614 if (flags & MAP_ANONYMOUS)
615 offset1 = 0;
616 else
617 offset1 = offset + real_start - start;
618 p = mmap(g2h_untagged(real_start), real_end - real_start,
619 host_prot, flags, fd, offset1);
620 if (p == MAP_FAILED)
621 goto fail;
622 }
623 }
624 the_end1:
625 if (flags & MAP_ANONYMOUS) {
626 page_flags |= PAGE_ANON;
627 }
628 page_flags |= PAGE_RESET;
629 page_set_flags(start, start + len, page_flags);
630 the_end:
631 trace_target_mmap_complete(start);
632 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
633 FILE *f = qemu_log_trylock();
634 if (f) {
635 fprintf(f, "page layout changed following mmap\n");
636 page_dump(f);
637 qemu_log_unlock(f);
638 }
639 }
640 tb_invalidate_phys_range(start, start + len);
641 mmap_unlock();
642 return start;
643fail:
644 mmap_unlock();
645 return -1;
646}
647
648static void mmap_reserve(abi_ulong start, abi_ulong size)
649{
650 abi_ulong real_start;
651 abi_ulong real_end;
652 abi_ulong addr;
653 abi_ulong end;
654 int prot;
655
656 real_start = start & qemu_host_page_mask;
657 real_end = HOST_PAGE_ALIGN(start + size);
658 end = start + size;
659 if (start > real_start) {
660
661 prot = 0;
662 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
663 prot |= page_get_flags(addr);
664 }
665 if (real_end == real_start + qemu_host_page_size) {
666 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
667 prot |= page_get_flags(addr);
668 }
669 end = real_end;
670 }
671 if (prot != 0)
672 real_start += qemu_host_page_size;
673 }
674 if (end < real_end) {
675 prot = 0;
676 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
677 prot |= page_get_flags(addr);
678 }
679 if (prot != 0)
680 real_end -= qemu_host_page_size;
681 }
682 if (real_start != real_end) {
683 mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
684 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
685 -1, 0);
686 }
687}
688
689int target_munmap(abi_ulong start, abi_ulong len)
690{
691 abi_ulong end, real_start, real_end, addr;
692 int prot, ret;
693
694 trace_target_munmap(start, len);
695
696 if (start & ~TARGET_PAGE_MASK)
697 return -TARGET_EINVAL;
698 len = TARGET_PAGE_ALIGN(len);
699 if (len == 0 || !guest_range_valid_untagged(start, len)) {
700 return -TARGET_EINVAL;
701 }
702
703 mmap_lock();
704 end = start + len;
705 real_start = start & qemu_host_page_mask;
706 real_end = HOST_PAGE_ALIGN(end);
707
708 if (start > real_start) {
709
710 prot = 0;
711 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
712 prot |= page_get_flags(addr);
713 }
714 if (real_end == real_start + qemu_host_page_size) {
715 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
716 prot |= page_get_flags(addr);
717 }
718 end = real_end;
719 }
720 if (prot != 0)
721 real_start += qemu_host_page_size;
722 }
723 if (end < real_end) {
724 prot = 0;
725 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
726 prot |= page_get_flags(addr);
727 }
728 if (prot != 0)
729 real_end -= qemu_host_page_size;
730 }
731
732 ret = 0;
733
734 if (real_start < real_end) {
735 if (reserved_va) {
736 mmap_reserve(real_start, real_end - real_start);
737 } else {
738 ret = munmap(g2h_untagged(real_start), real_end - real_start);
739 }
740 }
741
742 if (ret == 0) {
743 page_set_flags(start, start + len, 0);
744 tb_invalidate_phys_range(start, start + len);
745 }
746 mmap_unlock();
747 return ret;
748}
749
750abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
751 abi_ulong new_size, unsigned long flags,
752 abi_ulong new_addr)
753{
754 int prot;
755 void *host_addr;
756
757 if (!guest_range_valid_untagged(old_addr, old_size) ||
758 ((flags & MREMAP_FIXED) &&
759 !guest_range_valid_untagged(new_addr, new_size)) ||
760 ((flags & MREMAP_MAYMOVE) == 0 &&
761 !guest_range_valid_untagged(old_addr, new_size))) {
762 errno = ENOMEM;
763 return -1;
764 }
765
766 mmap_lock();
767
768 if (flags & MREMAP_FIXED) {
769 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
770 flags, g2h_untagged(new_addr));
771
772 if (reserved_va && host_addr != MAP_FAILED) {
773
774
775 mmap_reserve(old_addr, old_size);
776 }
777 } else if (flags & MREMAP_MAYMOVE) {
778 abi_ulong mmap_start;
779
780 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
781
782 if (mmap_start == -1) {
783 errno = ENOMEM;
784 host_addr = MAP_FAILED;
785 } else {
786 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
787 flags | MREMAP_FIXED,
788 g2h_untagged(mmap_start));
789 if (reserved_va) {
790 mmap_reserve(old_addr, old_size);
791 }
792 }
793 } else {
794 int prot = 0;
795 if (reserved_va && old_size < new_size) {
796 abi_ulong addr;
797 for (addr = old_addr + old_size;
798 addr < old_addr + new_size;
799 addr++) {
800 prot |= page_get_flags(addr);
801 }
802 }
803 if (prot == 0) {
804 host_addr = mremap(g2h_untagged(old_addr),
805 old_size, new_size, flags);
806
807 if (host_addr != MAP_FAILED) {
808
809 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
810
811 host_addr = mremap(g2h_untagged(old_addr),
812 new_size, old_size, flags);
813 errno = ENOMEM;
814 host_addr = MAP_FAILED;
815 } else if (reserved_va && old_size > new_size) {
816 mmap_reserve(old_addr + old_size, old_size - new_size);
817 }
818 }
819 } else {
820 errno = ENOMEM;
821 host_addr = MAP_FAILED;
822 }
823 }
824
825 if (host_addr == MAP_FAILED) {
826 new_addr = -1;
827 } else {
828 new_addr = h2g(host_addr);
829 prot = page_get_flags(old_addr);
830 page_set_flags(old_addr, old_addr + old_size, 0);
831 page_set_flags(new_addr, new_addr + new_size,
832 prot | PAGE_VALID | PAGE_RESET);
833 }
834 tb_invalidate_phys_range(new_addr, new_addr + new_size);
835 mmap_unlock();
836 return new_addr;
837}
838
839static bool can_passthrough_madv_dontneed(abi_ulong start, abi_ulong end)
840{
841 ulong addr;
842
843 if ((start | end) & ~qemu_host_page_mask) {
844 return false;
845 }
846
847 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
848 if (!(page_get_flags(addr) & PAGE_ANON)) {
849 return false;
850 }
851 }
852
853 return true;
854}
855
856abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
857{
858 abi_ulong len, end;
859 int ret = 0;
860
861 if (start & ~TARGET_PAGE_MASK) {
862 return -TARGET_EINVAL;
863 }
864 len = TARGET_PAGE_ALIGN(len_in);
865
866 if (len_in && !len) {
867 return -TARGET_EINVAL;
868 }
869
870 end = start + len;
871 if (end < start) {
872 return -TARGET_EINVAL;
873 }
874
875 if (end == start) {
876 return 0;
877 }
878
879 if (!guest_range_valid_untagged(start, len)) {
880 return -TARGET_EINVAL;
881 }
882
883
884
885
886
887
888
889
890
891
892
893 mmap_lock();
894 if (advice == MADV_DONTNEED &&
895 can_passthrough_madv_dontneed(start, end)) {
896 ret = get_errno(madvise(g2h_untagged(start), len, MADV_DONTNEED));
897 if (ret == 0) {
898 page_reset_target_data(start, start + len);
899 }
900 }
901 mmap_unlock();
902
903 return ret;
904}
905