1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20
21#include "qemu.h"
22#include "qemu-common.h"
23#include "translate-all.h"
24
25
26
27static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
28static __thread int mmap_lock_count;
29
30void mmap_lock(void)
31{
32 if (mmap_lock_count++ == 0) {
33 pthread_mutex_lock(&mmap_mutex);
34 }
35}
36
37void mmap_unlock(void)
38{
39 if (--mmap_lock_count == 0) {
40 pthread_mutex_unlock(&mmap_mutex);
41 }
42}
43
44bool have_mmap_lock(void)
45{
46 return mmap_lock_count > 0 ? true : false;
47}
48
49
50void mmap_fork_start(void)
51{
52 if (mmap_lock_count)
53 abort();
54 pthread_mutex_lock(&mmap_mutex);
55}
56
57void mmap_fork_end(int child)
58{
59 if (child)
60 pthread_mutex_init(&mmap_mutex, NULL);
61 else
62 pthread_mutex_unlock(&mmap_mutex);
63}
64
65
66int target_mprotect(abi_ulong start, abi_ulong len, int prot)
67{
68 abi_ulong end, host_start, host_end, addr;
69 int prot1, ret;
70
71#ifdef DEBUG_MMAP
72 printf("mprotect: start=0x" TARGET_ABI_FMT_lx
73 "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
74 prot & PROT_READ ? 'r' : '-',
75 prot & PROT_WRITE ? 'w' : '-',
76 prot & PROT_EXEC ? 'x' : '-');
77#endif
78
79 if ((start & ~TARGET_PAGE_MASK) != 0)
80 return -EINVAL;
81 len = TARGET_PAGE_ALIGN(len);
82 end = start + len;
83 if (end < start)
84 return -EINVAL;
85 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
86 if (len == 0)
87 return 0;
88
89 mmap_lock();
90 host_start = start & qemu_host_page_mask;
91 host_end = HOST_PAGE_ALIGN(end);
92 if (start > host_start) {
93
94 prot1 = prot;
95 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
96 prot1 |= page_get_flags(addr);
97 }
98 if (host_end == host_start + qemu_host_page_size) {
99 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
100 prot1 |= page_get_flags(addr);
101 }
102 end = host_end;
103 }
104 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
105 if (ret != 0)
106 goto error;
107 host_start += qemu_host_page_size;
108 }
109 if (end < host_end) {
110 prot1 = prot;
111 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
112 prot1 |= page_get_flags(addr);
113 }
114 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
115 prot1 & PAGE_BITS);
116 if (ret != 0)
117 goto error;
118 host_end -= qemu_host_page_size;
119 }
120
121
122 if (host_start < host_end) {
123 ret = mprotect(g2h(host_start), host_end - host_start, prot);
124 if (ret != 0)
125 goto error;
126 }
127 page_set_flags(start, start + len, prot | PAGE_VALID);
128 mmap_unlock();
129 return 0;
130error:
131 mmap_unlock();
132 return ret;
133}
134
135
136static int mmap_frag(abi_ulong real_start,
137 abi_ulong start, abi_ulong end,
138 int prot, int flags, int fd, abi_ulong offset)
139{
140 abi_ulong real_end, addr;
141 void *host_start;
142 int prot1, prot_new;
143
144 real_end = real_start + qemu_host_page_size;
145 host_start = g2h(real_start);
146
147
148 prot1 = 0;
149 for(addr = real_start; addr < real_end; addr++) {
150 if (addr < start || addr >= end)
151 prot1 |= page_get_flags(addr);
152 }
153
154 if (prot1 == 0) {
155
156 void *p = mmap(host_start, qemu_host_page_size, prot,
157 flags | MAP_ANONYMOUS, -1, 0);
158 if (p == MAP_FAILED)
159 return -1;
160 prot1 = prot;
161 }
162 prot1 &= PAGE_BITS;
163
164 prot_new = prot | prot1;
165 if (!(flags & MAP_ANONYMOUS)) {
166
167
168 if ((flags & MAP_TYPE) == MAP_SHARED &&
169 (prot & PROT_WRITE))
170 return -1;
171
172
173 if (!(prot1 & PROT_WRITE))
174 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
175
176
177 if (pread(fd, g2h(start), end - start, offset) == -1)
178 return -1;
179
180
181 if (prot_new != (prot1 | PROT_WRITE))
182 mprotect(host_start, qemu_host_page_size, prot_new);
183 } else {
184 if (prot_new != prot1) {
185 mprotect(host_start, qemu_host_page_size, prot_new);
186 }
187 if (prot_new & PROT_WRITE) {
188 memset(g2h(start), 0, end - start);
189 }
190 }
191 return 0;
192}
193
194#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
195# define TASK_UNMAPPED_BASE (1ul << 38)
196#elif defined(__CYGWIN__)
197
198# define TASK_UNMAPPED_BASE 0x18000000
199#else
200# define TASK_UNMAPPED_BASE 0x40000000
201#endif
202abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
203
204unsigned long last_brk;
205
206
207
208static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
209{
210 abi_ulong addr;
211 abi_ulong end_addr;
212 int prot;
213 int looped = 0;
214
215 if (size > reserved_va) {
216 return (abi_ulong)-1;
217 }
218
219 size = HOST_PAGE_ALIGN(size);
220 end_addr = start + size;
221 if (end_addr > reserved_va) {
222 end_addr = reserved_va;
223 }
224 addr = end_addr - qemu_host_page_size;
225
226 while (1) {
227 if (addr > end_addr) {
228 if (looped) {
229 return (abi_ulong)-1;
230 }
231 end_addr = reserved_va;
232 addr = end_addr - qemu_host_page_size;
233 looped = 1;
234 continue;
235 }
236 prot = page_get_flags(addr);
237 if (prot) {
238 end_addr = addr;
239 }
240 if (addr + size == end_addr) {
241 break;
242 }
243 addr -= qemu_host_page_size;
244 }
245
246 if (start == mmap_next_start) {
247 mmap_next_start = addr;
248 }
249
250 return addr;
251}
252
253
254
255
256
257
258
259abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
260{
261 void *ptr, *prev;
262 abi_ulong addr;
263 int wrapped, repeat;
264
265
266 if (start == 0) {
267 start = mmap_next_start;
268 } else {
269 start &= qemu_host_page_mask;
270 }
271
272 size = HOST_PAGE_ALIGN(size);
273
274 if (reserved_va) {
275 return mmap_find_vma_reserved(start, size);
276 }
277
278 addr = start;
279 wrapped = repeat = 0;
280 prev = 0;
281
282 for (;; prev = ptr) {
283
284
285
286
287
288
289
290 ptr = mmap(g2h(addr), size, PROT_NONE,
291 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
292
293
294 if (ptr == MAP_FAILED) {
295 return (abi_ulong)-1;
296 }
297
298
299
300 repeat = (ptr == prev ? repeat + 1 : 0);
301
302 if (h2g_valid(ptr + size - 1)) {
303 addr = h2g(ptr);
304
305 if ((addr & ~TARGET_PAGE_MASK) == 0) {
306
307 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
308 mmap_next_start = addr + size;
309 }
310 return addr;
311 }
312
313
314 switch (repeat) {
315 case 0:
316
317
318
319 addr = TARGET_PAGE_ALIGN(addr);
320 break;
321 case 1:
322
323
324 addr &= TARGET_PAGE_MASK;
325 break;
326 case 2:
327
328 addr = 0;
329 break;
330 default:
331
332 addr = -1;
333 break;
334 }
335 } else {
336
337
338 addr = (repeat ? -1 : 0);
339 }
340
341
342 munmap(ptr, size);
343
344
345 if (addr == (abi_ulong)-1) {
346 return (abi_ulong)-1;
347 } else if (addr == 0) {
348 if (wrapped) {
349 return (abi_ulong)-1;
350 }
351 wrapped = 1;
352
353
354 addr = (mmap_min_addr > TARGET_PAGE_SIZE
355 ? TARGET_PAGE_ALIGN(mmap_min_addr)
356 : TARGET_PAGE_SIZE);
357 } else if (wrapped && addr >= start) {
358 return (abi_ulong)-1;
359 }
360 }
361}
362
363
364abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
365 int flags, int fd, abi_ulong offset)
366{
367 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
368
369 mmap_lock();
370#ifdef DEBUG_MMAP
371 {
372 printf("mmap: start=0x" TARGET_ABI_FMT_lx
373 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
374 start, len,
375 prot & PROT_READ ? 'r' : '-',
376 prot & PROT_WRITE ? 'w' : '-',
377 prot & PROT_EXEC ? 'x' : '-');
378 if (flags & MAP_FIXED)
379 printf("MAP_FIXED ");
380 if (flags & MAP_ANONYMOUS)
381 printf("MAP_ANON ");
382 switch(flags & MAP_TYPE) {
383 case MAP_PRIVATE:
384 printf("MAP_PRIVATE ");
385 break;
386 case MAP_SHARED:
387 printf("MAP_SHARED ");
388 break;
389 default:
390 printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
391 break;
392 }
393 printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
394 }
395#endif
396
397 if (offset & ~TARGET_PAGE_MASK) {
398 errno = EINVAL;
399 goto fail;
400 }
401
402 len = TARGET_PAGE_ALIGN(len);
403 if (len == 0)
404 goto the_end;
405 real_start = start & qemu_host_page_mask;
406 host_offset = offset & qemu_host_page_mask;
407
408
409
410 if (!(flags & MAP_FIXED)) {
411 host_len = len + offset - host_offset;
412 host_len = HOST_PAGE_ALIGN(host_len);
413 start = mmap_find_vma(real_start, host_len);
414 if (start == (abi_ulong)-1) {
415 errno = ENOMEM;
416 goto fail;
417 }
418 }
419
420
421
422
423
424
425
426
427
428
429
430
431
432 if ((qemu_real_host_page_size < TARGET_PAGE_SIZE)
433 && !(flags & MAP_ANONYMOUS)) {
434 struct stat sb;
435
436 if (fstat (fd, &sb) == -1)
437 goto fail;
438
439
440 if (offset + len > sb.st_size) {
441
442
443
444 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
445 }
446 }
447
448 if (!(flags & MAP_FIXED)) {
449 unsigned long host_start;
450 void *p;
451
452 host_len = len + offset - host_offset;
453 host_len = HOST_PAGE_ALIGN(host_len);
454
455
456
457
458 p = mmap(g2h(start), host_len, prot,
459 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
460 if (p == MAP_FAILED)
461 goto fail;
462
463 host_start = (unsigned long)p;
464 if (!(flags & MAP_ANONYMOUS)) {
465 p = mmap(g2h(start), len, prot,
466 flags | MAP_FIXED, fd, host_offset);
467 if (p == MAP_FAILED) {
468 munmap(g2h(start), host_len);
469 goto fail;
470 }
471 host_start += offset - host_offset;
472 }
473 start = h2g(host_start);
474 } else {
475 if (start & ~TARGET_PAGE_MASK) {
476 errno = EINVAL;
477 goto fail;
478 }
479 end = start + len;
480 real_end = HOST_PAGE_ALIGN(end);
481
482
483
484
485
486
487 if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
488 errno = EINVAL;
489 goto fail;
490 }
491
492
493
494 if (!(flags & MAP_ANONYMOUS) &&
495 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
496
497
498 if ((flags & MAP_TYPE) == MAP_SHARED &&
499 (prot & PROT_WRITE)) {
500 errno = EINVAL;
501 goto fail;
502 }
503 retaddr = target_mmap(start, len, prot | PROT_WRITE,
504 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
505 -1, 0);
506 if (retaddr == -1)
507 goto fail;
508 if (pread(fd, g2h(start), len, offset) == -1)
509 goto fail;
510 if (!(prot & PROT_WRITE)) {
511 ret = target_mprotect(start, len, prot);
512 assert(ret == 0);
513 }
514 goto the_end;
515 }
516
517
518 if (start > real_start) {
519 if (real_end == real_start + qemu_host_page_size) {
520
521 ret = mmap_frag(real_start, start, end,
522 prot, flags, fd, offset);
523 if (ret == -1)
524 goto fail;
525 goto the_end1;
526 }
527 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
528 prot, flags, fd, offset);
529 if (ret == -1)
530 goto fail;
531 real_start += qemu_host_page_size;
532 }
533
534 if (end < real_end) {
535 ret = mmap_frag(real_end - qemu_host_page_size,
536 real_end - qemu_host_page_size, end,
537 prot, flags, fd,
538 offset + real_end - qemu_host_page_size - start);
539 if (ret == -1)
540 goto fail;
541 real_end -= qemu_host_page_size;
542 }
543
544
545 if (real_start < real_end) {
546 void *p;
547 unsigned long offset1;
548 if (flags & MAP_ANONYMOUS)
549 offset1 = 0;
550 else
551 offset1 = offset + real_start - start;
552 p = mmap(g2h(real_start), real_end - real_start,
553 prot, flags, fd, offset1);
554 if (p == MAP_FAILED)
555 goto fail;
556 }
557 }
558 the_end1:
559 page_set_flags(start, start + len, prot | PAGE_VALID);
560 the_end:
561#ifdef DEBUG_MMAP
562 printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
563 page_dump(stdout);
564 printf("\n");
565#endif
566 tb_invalidate_phys_range(start, start + len);
567 mmap_unlock();
568 return start;
569fail:
570 mmap_unlock();
571 return -1;
572}
573
574static void mmap_reserve(abi_ulong start, abi_ulong size)
575{
576 abi_ulong real_start;
577 abi_ulong real_end;
578 abi_ulong addr;
579 abi_ulong end;
580 int prot;
581
582 real_start = start & qemu_host_page_mask;
583 real_end = HOST_PAGE_ALIGN(start + size);
584 end = start + size;
585 if (start > real_start) {
586
587 prot = 0;
588 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
589 prot |= page_get_flags(addr);
590 }
591 if (real_end == real_start + qemu_host_page_size) {
592 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
593 prot |= page_get_flags(addr);
594 }
595 end = real_end;
596 }
597 if (prot != 0)
598 real_start += qemu_host_page_size;
599 }
600 if (end < real_end) {
601 prot = 0;
602 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
603 prot |= page_get_flags(addr);
604 }
605 if (prot != 0)
606 real_end -= qemu_host_page_size;
607 }
608 if (real_start != real_end) {
609 mmap(g2h(real_start), real_end - real_start, PROT_NONE,
610 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
611 -1, 0);
612 }
613}
614
615int target_munmap(abi_ulong start, abi_ulong len)
616{
617 abi_ulong end, real_start, real_end, addr;
618 int prot, ret;
619
620#ifdef DEBUG_MMAP
621 printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
622 TARGET_ABI_FMT_lx "\n",
623 start, len);
624#endif
625 if (start & ~TARGET_PAGE_MASK)
626 return -EINVAL;
627 len = TARGET_PAGE_ALIGN(len);
628 if (len == 0)
629 return -EINVAL;
630 mmap_lock();
631 end = start + len;
632 real_start = start & qemu_host_page_mask;
633 real_end = HOST_PAGE_ALIGN(end);
634
635 if (start > real_start) {
636
637 prot = 0;
638 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
639 prot |= page_get_flags(addr);
640 }
641 if (real_end == real_start + qemu_host_page_size) {
642 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
643 prot |= page_get_flags(addr);
644 }
645 end = real_end;
646 }
647 if (prot != 0)
648 real_start += qemu_host_page_size;
649 }
650 if (end < real_end) {
651 prot = 0;
652 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
653 prot |= page_get_flags(addr);
654 }
655 if (prot != 0)
656 real_end -= qemu_host_page_size;
657 }
658
659 ret = 0;
660
661 if (real_start < real_end) {
662 if (reserved_va) {
663 mmap_reserve(real_start, real_end - real_start);
664 } else {
665 ret = munmap(g2h(real_start), real_end - real_start);
666 }
667 }
668
669 if (ret == 0) {
670 page_set_flags(start, start + len, 0);
671 tb_invalidate_phys_range(start, start + len);
672 }
673 mmap_unlock();
674 return ret;
675}
676
677abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
678 abi_ulong new_size, unsigned long flags,
679 abi_ulong new_addr)
680{
681 int prot;
682 void *host_addr;
683
684 mmap_lock();
685
686 if (flags & MREMAP_FIXED) {
687 host_addr = mremap(g2h(old_addr), old_size, new_size,
688 flags, g2h(new_addr));
689
690 if (reserved_va && host_addr != MAP_FAILED) {
691
692
693 mmap_reserve(old_addr, old_size);
694 }
695 } else if (flags & MREMAP_MAYMOVE) {
696 abi_ulong mmap_start;
697
698 mmap_start = mmap_find_vma(0, new_size);
699
700 if (mmap_start == -1) {
701 errno = ENOMEM;
702 host_addr = MAP_FAILED;
703 } else {
704 host_addr = mremap(g2h(old_addr), old_size, new_size,
705 flags | MREMAP_FIXED, g2h(mmap_start));
706 if (reserved_va) {
707 mmap_reserve(old_addr, old_size);
708 }
709 }
710 } else {
711 int prot = 0;
712 if (reserved_va && old_size < new_size) {
713 abi_ulong addr;
714 for (addr = old_addr + old_size;
715 addr < old_addr + new_size;
716 addr++) {
717 prot |= page_get_flags(addr);
718 }
719 }
720 if (prot == 0) {
721 host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
722 if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
723 mmap_reserve(old_addr + old_size, new_size - old_size);
724 }
725 } else {
726 errno = ENOMEM;
727 host_addr = MAP_FAILED;
728 }
729
730 if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
731
732 host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
733 errno = ENOMEM;
734 host_addr = MAP_FAILED;
735 }
736 }
737
738 if (host_addr == MAP_FAILED) {
739 new_addr = -1;
740 } else {
741 new_addr = h2g(host_addr);
742 prot = page_get_flags(old_addr);
743 page_set_flags(old_addr, old_addr + old_size, 0);
744 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
745 }
746 tb_invalidate_phys_range(new_addr, new_addr + new_size);
747 mmap_unlock();
748 return new_addr;
749}
750
751int target_msync(abi_ulong start, abi_ulong len, int flags)
752{
753 abi_ulong end;
754
755 if (start & ~TARGET_PAGE_MASK)
756 return -EINVAL;
757 len = TARGET_PAGE_ALIGN(len);
758 end = start + len;
759 if (end < start)
760 return -EINVAL;
761 if (end == start)
762 return 0;
763
764 start &= qemu_host_page_mask;
765 return msync(g2h(start), end - start, flags);
766}
767