1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "trace.h"
21#include "exec/log.h"
22#include "qemu.h"
23
24static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
25static __thread int mmap_lock_count;
26
27void mmap_lock(void)
28{
29 if (mmap_lock_count++ == 0) {
30 pthread_mutex_lock(&mmap_mutex);
31 }
32}
33
34void mmap_unlock(void)
35{
36 if (--mmap_lock_count == 0) {
37 pthread_mutex_unlock(&mmap_mutex);
38 }
39}
40
41bool have_mmap_lock(void)
42{
43 return mmap_lock_count > 0 ? true : false;
44}
45
46
47void mmap_fork_start(void)
48{
49 if (mmap_lock_count)
50 abort();
51 pthread_mutex_lock(&mmap_mutex);
52}
53
54void mmap_fork_end(int child)
55{
56 if (child)
57 pthread_mutex_init(&mmap_mutex, NULL);
58 else
59 pthread_mutex_unlock(&mmap_mutex);
60}
61
62
63
64
65
66
67
68static int validate_prot_to_pageflags(int *host_prot, int prot)
69{
70 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
71 int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
72
73
74
75
76
77
78
79
80
81
82
83 *host_prot = (prot & (PROT_READ | PROT_WRITE))
84 | (prot & PROT_EXEC ? PROT_READ : 0);
85
86#ifdef TARGET_AARCH64
87 {
88 ARMCPU *cpu = ARM_CPU(thread_cpu);
89
90
91
92
93
94
95
96 if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
97 valid |= TARGET_PROT_BTI;
98 page_flags |= PAGE_BTI;
99 }
100
101 if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
102 valid |= TARGET_PROT_MTE;
103 page_flags |= PAGE_MTE;
104 }
105 }
106#endif
107
108 return prot & ~valid ? 0 : page_flags;
109}
110
111
112int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
113{
114 abi_ulong end, host_start, host_end, addr;
115 int prot1, ret, page_flags, host_prot;
116
117 trace_target_mprotect(start, len, target_prot);
118
119 if ((start & ~TARGET_PAGE_MASK) != 0) {
120 return -TARGET_EINVAL;
121 }
122 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
123 if (!page_flags) {
124 return -TARGET_EINVAL;
125 }
126 len = TARGET_PAGE_ALIGN(len);
127 end = start + len;
128 if (!guest_range_valid_untagged(start, len)) {
129 return -TARGET_ENOMEM;
130 }
131 if (len == 0) {
132 return 0;
133 }
134
135 mmap_lock();
136 host_start = start & qemu_host_page_mask;
137 host_end = HOST_PAGE_ALIGN(end);
138 if (start > host_start) {
139
140 prot1 = host_prot;
141 for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
142 prot1 |= page_get_flags(addr);
143 }
144 if (host_end == host_start + qemu_host_page_size) {
145 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
146 prot1 |= page_get_flags(addr);
147 }
148 end = host_end;
149 }
150 ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
151 prot1 & PAGE_BITS);
152 if (ret != 0) {
153 goto error;
154 }
155 host_start += qemu_host_page_size;
156 }
157 if (end < host_end) {
158 prot1 = host_prot;
159 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
160 prot1 |= page_get_flags(addr);
161 }
162 ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
163 qemu_host_page_size, prot1 & PAGE_BITS);
164 if (ret != 0) {
165 goto error;
166 }
167 host_end -= qemu_host_page_size;
168 }
169
170
171 if (host_start < host_end) {
172 ret = mprotect(g2h_untagged(host_start),
173 host_end - host_start, host_prot);
174 if (ret != 0) {
175 goto error;
176 }
177 }
178 page_set_flags(start, start + len, page_flags);
179 mmap_unlock();
180 return 0;
181error:
182 mmap_unlock();
183 return ret;
184}
185
186
187static int mmap_frag(abi_ulong real_start,
188 abi_ulong start, abi_ulong end,
189 int prot, int flags, int fd, abi_ulong offset)
190{
191 abi_ulong real_end, addr;
192 void *host_start;
193 int prot1, prot_new;
194
195 real_end = real_start + qemu_host_page_size;
196 host_start = g2h_untagged(real_start);
197
198
199 prot1 = 0;
200 for(addr = real_start; addr < real_end; addr++) {
201 if (addr < start || addr >= end)
202 prot1 |= page_get_flags(addr);
203 }
204
205 if (prot1 == 0) {
206
207 void *p = mmap(host_start, qemu_host_page_size, prot,
208 flags | MAP_ANONYMOUS, -1, 0);
209 if (p == MAP_FAILED)
210 return -1;
211 prot1 = prot;
212 }
213 prot1 &= PAGE_BITS;
214
215 prot_new = prot | prot1;
216 if (!(flags & MAP_ANONYMOUS)) {
217
218
219 if ((flags & MAP_TYPE) == MAP_SHARED &&
220 (prot & PROT_WRITE))
221 return -1;
222
223
224 if (!(prot1 & PROT_WRITE))
225 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
226
227
228 if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
229 return -1;
230
231
232 if (prot_new != (prot1 | PROT_WRITE))
233 mprotect(host_start, qemu_host_page_size, prot_new);
234 } else {
235 if (prot_new != prot1) {
236 mprotect(host_start, qemu_host_page_size, prot_new);
237 }
238 if (prot_new & PROT_WRITE) {
239 memset(g2h_untagged(start), 0, end - start);
240 }
241 }
242 return 0;
243}
244
245#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
246#ifdef TARGET_AARCH64
247# define TASK_UNMAPPED_BASE 0x5500000000
248#else
249# define TASK_UNMAPPED_BASE (1ul << 38)
250#endif
251#else
252# define TASK_UNMAPPED_BASE 0x40000000
253#endif
254abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
255
256unsigned long last_brk;
257
258
259
260static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
261 abi_ulong align)
262{
263 abi_ulong addr, end_addr, incr = qemu_host_page_size;
264 int prot;
265 bool looped = false;
266
267 if (size > reserved_va) {
268 return (abi_ulong)-1;
269 }
270
271
272
273 end_addr = start + size;
274 if (start > reserved_va - size) {
275
276 end_addr = ((reserved_va - size) & -align) + size;
277 looped = true;
278 }
279
280
281 addr = end_addr;
282 while (1) {
283 addr -= incr;
284 if (addr > end_addr) {
285 if (looped) {
286
287 return (abi_ulong)-1;
288 }
289
290 addr = end_addr = ((reserved_va - size) & -align) + size;
291 looped = true;
292 } else {
293 prot = page_get_flags(addr);
294 if (prot) {
295
296 addr = end_addr = ((addr - size) & -align) + size;
297 } else if (addr && addr + size == end_addr) {
298
299 if (start == mmap_next_start) {
300 mmap_next_start = addr;
301 }
302 return addr;
303 }
304 }
305 }
306}
307
308
309
310
311
312
313
314abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
315{
316 void *ptr, *prev;
317 abi_ulong addr;
318 int wrapped, repeat;
319
320 align = MAX(align, qemu_host_page_size);
321
322
323 if (start == 0) {
324 start = mmap_next_start;
325 } else {
326 start &= qemu_host_page_mask;
327 }
328 start = ROUND_UP(start, align);
329
330 size = HOST_PAGE_ALIGN(size);
331
332 if (reserved_va) {
333 return mmap_find_vma_reserved(start, size, align);
334 }
335
336 addr = start;
337 wrapped = repeat = 0;
338 prev = 0;
339
340 for (;; prev = ptr) {
341
342
343
344
345
346
347
348 ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
349 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
350
351
352 if (ptr == MAP_FAILED) {
353 return (abi_ulong)-1;
354 }
355
356
357
358 repeat = (ptr == prev ? repeat + 1 : 0);
359
360 if (h2g_valid(ptr + size - 1)) {
361 addr = h2g(ptr);
362
363 if ((addr & (align - 1)) == 0) {
364
365 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
366 mmap_next_start = addr + size;
367 }
368 return addr;
369 }
370
371
372 switch (repeat) {
373 case 0:
374
375
376
377 addr = ROUND_UP(addr, align);
378 break;
379 case 1:
380
381
382 addr &= -align;
383 break;
384 case 2:
385
386 addr = 0;
387 break;
388 default:
389
390 addr = -1;
391 break;
392 }
393 } else {
394
395
396 addr = (repeat ? -1 : 0);
397 }
398
399
400 munmap(ptr, size);
401
402
403 if (addr == (abi_ulong)-1) {
404 return (abi_ulong)-1;
405 } else if (addr == 0) {
406 if (wrapped) {
407 return (abi_ulong)-1;
408 }
409 wrapped = 1;
410
411
412 addr = (mmap_min_addr > TARGET_PAGE_SIZE
413 ? TARGET_PAGE_ALIGN(mmap_min_addr)
414 : TARGET_PAGE_SIZE);
415 } else if (wrapped && addr >= start) {
416 return (abi_ulong)-1;
417 }
418 }
419}
420
421
422abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
423 int flags, int fd, abi_ulong offset)
424{
425 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
426 int page_flags, host_prot;
427
428 mmap_lock();
429 trace_target_mmap(start, len, target_prot, flags, fd, offset);
430
431 if (!len) {
432 errno = EINVAL;
433 goto fail;
434 }
435
436 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
437 if (!page_flags) {
438 errno = EINVAL;
439 goto fail;
440 }
441
442
443 len = TARGET_PAGE_ALIGN(len);
444 if (!len) {
445 errno = ENOMEM;
446 goto fail;
447 }
448
449 if (offset & ~TARGET_PAGE_MASK) {
450 errno = EINVAL;
451 goto fail;
452 }
453
454
455
456
457
458
459
460 if (flags & MAP_SHARED) {
461 CPUState *cpu = thread_cpu;
462 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
463 cpu->tcg_cflags |= CF_PARALLEL;
464 tb_flush(cpu);
465 }
466 }
467
468 real_start = start & qemu_host_page_mask;
469 host_offset = offset & qemu_host_page_mask;
470
471
472
473 if (!(flags & MAP_FIXED)) {
474 host_len = len + offset - host_offset;
475 host_len = HOST_PAGE_ALIGN(host_len);
476 start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
477 if (start == (abi_ulong)-1) {
478 errno = ENOMEM;
479 goto fail;
480 }
481 }
482
483
484
485
486
487
488
489
490
491
492
493
494
495 if ((qemu_real_host_page_size < qemu_host_page_size) &&
496 !(flags & MAP_ANONYMOUS)) {
497 struct stat sb;
498
499 if (fstat (fd, &sb) == -1)
500 goto fail;
501
502
503 if (offset + len > sb.st_size) {
504
505
506
507 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
508 }
509 }
510
511 if (!(flags & MAP_FIXED)) {
512 unsigned long host_start;
513 void *p;
514
515 host_len = len + offset - host_offset;
516 host_len = HOST_PAGE_ALIGN(host_len);
517
518
519
520
521 p = mmap(g2h_untagged(start), host_len, host_prot,
522 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
523 if (p == MAP_FAILED) {
524 goto fail;
525 }
526
527 host_start = (unsigned long)p;
528 if (!(flags & MAP_ANONYMOUS)) {
529 p = mmap(g2h_untagged(start), len, host_prot,
530 flags | MAP_FIXED, fd, host_offset);
531 if (p == MAP_FAILED) {
532 munmap(g2h_untagged(start), host_len);
533 goto fail;
534 }
535 host_start += offset - host_offset;
536 }
537 start = h2g(host_start);
538 } else {
539 if (start & ~TARGET_PAGE_MASK) {
540 errno = EINVAL;
541 goto fail;
542 }
543 end = start + len;
544 real_end = HOST_PAGE_ALIGN(end);
545
546
547
548
549
550
551 if (end < start || !guest_range_valid_untagged(start, len)) {
552 errno = ENOMEM;
553 goto fail;
554 }
555
556
557
558 if (!(flags & MAP_ANONYMOUS) &&
559 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
560
561
562 if ((flags & MAP_TYPE) == MAP_SHARED &&
563 (host_prot & PROT_WRITE)) {
564 errno = EINVAL;
565 goto fail;
566 }
567 retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
568 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
569 -1, 0);
570 if (retaddr == -1)
571 goto fail;
572 if (pread(fd, g2h_untagged(start), len, offset) == -1)
573 goto fail;
574 if (!(host_prot & PROT_WRITE)) {
575 ret = target_mprotect(start, len, target_prot);
576 assert(ret == 0);
577 }
578 goto the_end;
579 }
580
581
582 if (start > real_start) {
583 if (real_end == real_start + qemu_host_page_size) {
584
585 ret = mmap_frag(real_start, start, end,
586 host_prot, flags, fd, offset);
587 if (ret == -1)
588 goto fail;
589 goto the_end1;
590 }
591 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
592 host_prot, flags, fd, offset);
593 if (ret == -1)
594 goto fail;
595 real_start += qemu_host_page_size;
596 }
597
598 if (end < real_end) {
599 ret = mmap_frag(real_end - qemu_host_page_size,
600 real_end - qemu_host_page_size, end,
601 host_prot, flags, fd,
602 offset + real_end - qemu_host_page_size - start);
603 if (ret == -1)
604 goto fail;
605 real_end -= qemu_host_page_size;
606 }
607
608
609 if (real_start < real_end) {
610 void *p;
611 unsigned long offset1;
612 if (flags & MAP_ANONYMOUS)
613 offset1 = 0;
614 else
615 offset1 = offset + real_start - start;
616 p = mmap(g2h_untagged(real_start), real_end - real_start,
617 host_prot, flags, fd, offset1);
618 if (p == MAP_FAILED)
619 goto fail;
620 }
621 }
622 the_end1:
623 if (flags & MAP_ANONYMOUS) {
624 page_flags |= PAGE_ANON;
625 }
626 page_flags |= PAGE_RESET;
627 page_set_flags(start, start + len, page_flags);
628 the_end:
629 trace_target_mmap_complete(start);
630 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
631 log_page_dump(__func__);
632 }
633 tb_invalidate_phys_range(start, start + len);
634 mmap_unlock();
635 return start;
636fail:
637 mmap_unlock();
638 return -1;
639}
640
641static void mmap_reserve(abi_ulong start, abi_ulong size)
642{
643 abi_ulong real_start;
644 abi_ulong real_end;
645 abi_ulong addr;
646 abi_ulong end;
647 int prot;
648
649 real_start = start & qemu_host_page_mask;
650 real_end = HOST_PAGE_ALIGN(start + size);
651 end = start + size;
652 if (start > real_start) {
653
654 prot = 0;
655 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
656 prot |= page_get_flags(addr);
657 }
658 if (real_end == real_start + qemu_host_page_size) {
659 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
660 prot |= page_get_flags(addr);
661 }
662 end = real_end;
663 }
664 if (prot != 0)
665 real_start += qemu_host_page_size;
666 }
667 if (end < real_end) {
668 prot = 0;
669 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
670 prot |= page_get_flags(addr);
671 }
672 if (prot != 0)
673 real_end -= qemu_host_page_size;
674 }
675 if (real_start != real_end) {
676 mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
677 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
678 -1, 0);
679 }
680}
681
682int target_munmap(abi_ulong start, abi_ulong len)
683{
684 abi_ulong end, real_start, real_end, addr;
685 int prot, ret;
686
687 trace_target_munmap(start, len);
688
689 if (start & ~TARGET_PAGE_MASK)
690 return -TARGET_EINVAL;
691 len = TARGET_PAGE_ALIGN(len);
692 if (len == 0 || !guest_range_valid_untagged(start, len)) {
693 return -TARGET_EINVAL;
694 }
695
696 mmap_lock();
697 end = start + len;
698 real_start = start & qemu_host_page_mask;
699 real_end = HOST_PAGE_ALIGN(end);
700
701 if (start > real_start) {
702
703 prot = 0;
704 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
705 prot |= page_get_flags(addr);
706 }
707 if (real_end == real_start + qemu_host_page_size) {
708 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
709 prot |= page_get_flags(addr);
710 }
711 end = real_end;
712 }
713 if (prot != 0)
714 real_start += qemu_host_page_size;
715 }
716 if (end < real_end) {
717 prot = 0;
718 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
719 prot |= page_get_flags(addr);
720 }
721 if (prot != 0)
722 real_end -= qemu_host_page_size;
723 }
724
725 ret = 0;
726
727 if (real_start < real_end) {
728 if (reserved_va) {
729 mmap_reserve(real_start, real_end - real_start);
730 } else {
731 ret = munmap(g2h_untagged(real_start), real_end - real_start);
732 }
733 }
734
735 if (ret == 0) {
736 page_set_flags(start, start + len, 0);
737 tb_invalidate_phys_range(start, start + len);
738 }
739 mmap_unlock();
740 return ret;
741}
742
743abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
744 abi_ulong new_size, unsigned long flags,
745 abi_ulong new_addr)
746{
747 int prot;
748 void *host_addr;
749
750 if (!guest_range_valid_untagged(old_addr, old_size) ||
751 ((flags & MREMAP_FIXED) &&
752 !guest_range_valid_untagged(new_addr, new_size)) ||
753 ((flags & MREMAP_MAYMOVE) == 0 &&
754 !guest_range_valid_untagged(old_addr, new_size))) {
755 errno = ENOMEM;
756 return -1;
757 }
758
759 mmap_lock();
760
761 if (flags & MREMAP_FIXED) {
762 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
763 flags, g2h_untagged(new_addr));
764
765 if (reserved_va && host_addr != MAP_FAILED) {
766
767
768 mmap_reserve(old_addr, old_size);
769 }
770 } else if (flags & MREMAP_MAYMOVE) {
771 abi_ulong mmap_start;
772
773 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
774
775 if (mmap_start == -1) {
776 errno = ENOMEM;
777 host_addr = MAP_FAILED;
778 } else {
779 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
780 flags | MREMAP_FIXED,
781 g2h_untagged(mmap_start));
782 if (reserved_va) {
783 mmap_reserve(old_addr, old_size);
784 }
785 }
786 } else {
787 int prot = 0;
788 if (reserved_va && old_size < new_size) {
789 abi_ulong addr;
790 for (addr = old_addr + old_size;
791 addr < old_addr + new_size;
792 addr++) {
793 prot |= page_get_flags(addr);
794 }
795 }
796 if (prot == 0) {
797 host_addr = mremap(g2h_untagged(old_addr),
798 old_size, new_size, flags);
799
800 if (host_addr != MAP_FAILED) {
801
802 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
803
804 host_addr = mremap(g2h_untagged(old_addr),
805 new_size, old_size, flags);
806 errno = ENOMEM;
807 host_addr = MAP_FAILED;
808 } else if (reserved_va && old_size > new_size) {
809 mmap_reserve(old_addr + old_size, old_size - new_size);
810 }
811 }
812 } else {
813 errno = ENOMEM;
814 host_addr = MAP_FAILED;
815 }
816 }
817
818 if (host_addr == MAP_FAILED) {
819 new_addr = -1;
820 } else {
821 new_addr = h2g(host_addr);
822 prot = page_get_flags(old_addr);
823 page_set_flags(old_addr, old_addr + old_size, 0);
824 page_set_flags(new_addr, new_addr + new_size,
825 prot | PAGE_VALID | PAGE_RESET);
826 }
827 tb_invalidate_phys_range(new_addr, new_addr + new_size);
828 mmap_unlock();
829 return new_addr;
830}
831