1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <stdlib.h>
20#include <stdio.h>
21#include <stdarg.h>
22#include <string.h>
23#include <unistd.h>
24#include <errno.h>
25#include <sys/types.h>
26#include <sys/stat.h>
27#include <sys/mman.h>
28#include <linux/mman.h>
29#include <linux/unistd.h>
30
31#include "qemu.h"
32#include "qemu-common.h"
33
34
35
36#if defined(CONFIG_USE_NPTL)
37static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
38static __thread int mmap_lock_count;
39
40void mmap_lock(void)
41{
42 if (mmap_lock_count++ == 0) {
43 pthread_mutex_lock(&mmap_mutex);
44 }
45}
46
47void mmap_unlock(void)
48{
49 if (--mmap_lock_count == 0) {
50 pthread_mutex_unlock(&mmap_mutex);
51 }
52}
53
54
55void mmap_fork_start(void)
56{
57 if (mmap_lock_count)
58 abort();
59 pthread_mutex_lock(&mmap_mutex);
60}
61
62void mmap_fork_end(int child)
63{
64 if (child)
65 pthread_mutex_init(&mmap_mutex, NULL);
66 else
67 pthread_mutex_unlock(&mmap_mutex);
68}
69#else
70
71void mmap_lock(void)
72{
73}
74
75void mmap_unlock(void)
76{
77}
78#endif
79
80
81int target_mprotect(abi_ulong start, abi_ulong len, int prot)
82{
83 abi_ulong end, host_start, host_end, addr;
84 int prot1, ret;
85
86#ifdef DEBUG_MMAP
87 printf("mprotect: start=0x" TARGET_ABI_FMT_lx
88 "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
89 prot & PROT_READ ? 'r' : '-',
90 prot & PROT_WRITE ? 'w' : '-',
91 prot & PROT_EXEC ? 'x' : '-');
92#endif
93
94 if ((start & ~TARGET_PAGE_MASK) != 0)
95 return -EINVAL;
96 len = TARGET_PAGE_ALIGN(len);
97 end = start + len;
98 if (end < start)
99 return -EINVAL;
100 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
101 if (len == 0)
102 return 0;
103
104 mmap_lock();
105 host_start = start & qemu_host_page_mask;
106 host_end = HOST_PAGE_ALIGN(end);
107 if (start > host_start) {
108
109 prot1 = prot;
110 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
111 prot1 |= page_get_flags(addr);
112 }
113 if (host_end == host_start + qemu_host_page_size) {
114 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
115 prot1 |= page_get_flags(addr);
116 }
117 end = host_end;
118 }
119 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
120 if (ret != 0)
121 goto error;
122 host_start += qemu_host_page_size;
123 }
124 if (end < host_end) {
125 prot1 = prot;
126 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
127 prot1 |= page_get_flags(addr);
128 }
129 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
130 prot1 & PAGE_BITS);
131 if (ret != 0)
132 goto error;
133 host_end -= qemu_host_page_size;
134 }
135
136
137 if (host_start < host_end) {
138 ret = mprotect(g2h(host_start), host_end - host_start, prot);
139 if (ret != 0)
140 goto error;
141 }
142 page_set_flags(start, start + len, prot | PAGE_VALID);
143 mmap_unlock();
144 return 0;
145error:
146 mmap_unlock();
147 return ret;
148}
149
150
151static int mmap_frag(abi_ulong real_start,
152 abi_ulong start, abi_ulong end,
153 int prot, int flags, int fd, abi_ulong offset)
154{
155 abi_ulong real_end, addr;
156 void *host_start;
157 int prot1, prot_new;
158
159 real_end = real_start + qemu_host_page_size;
160 host_start = g2h(real_start);
161
162
163 prot1 = 0;
164 for(addr = real_start; addr < real_end; addr++) {
165 if (addr < start || addr >= end)
166 prot1 |= page_get_flags(addr);
167 }
168
169 if (prot1 == 0) {
170
171 void *p = mmap(host_start, qemu_host_page_size, prot,
172 flags | MAP_ANONYMOUS, -1, 0);
173 if (p == MAP_FAILED)
174 return -1;
175 prot1 = prot;
176 }
177 prot1 &= PAGE_BITS;
178
179 prot_new = prot | prot1;
180 if (!(flags & MAP_ANONYMOUS)) {
181
182
183 if ((flags & MAP_TYPE) == MAP_SHARED &&
184 (prot & PROT_WRITE))
185 return -1;
186
187
188 if (!(prot1 & PROT_WRITE))
189 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
190
191
192 if (pread(fd, g2h(start), end - start, offset) == -1)
193 return -1;
194
195
196 if (prot_new != (prot1 | PROT_WRITE))
197 mprotect(host_start, qemu_host_page_size, prot_new);
198 } else {
199
200 if (prot_new != prot1) {
201 mprotect(host_start, qemu_host_page_size, prot_new);
202 }
203 }
204 return 0;
205}
206
207#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
208# define TASK_UNMAPPED_BASE (1ul << 38)
209#elif defined(__CYGWIN__)
210
211# define TASK_UNMAPPED_BASE 0x18000000
212#else
213# define TASK_UNMAPPED_BASE 0x40000000
214#endif
215static abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
216
217unsigned long last_brk;
218
219
220
221static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
222{
223 abi_ulong addr;
224 abi_ulong last_addr;
225 int prot;
226 int looped = 0;
227
228 if (size > RESERVED_VA) {
229 return (abi_ulong)-1;
230 }
231
232 last_addr = start;
233 for (addr = start; last_addr + size != addr; addr += qemu_host_page_size) {
234 if (last_addr + size >= RESERVED_VA
235 || (abi_ulong)(last_addr + size) < last_addr) {
236 if (looped) {
237 return (abi_ulong)-1;
238 }
239 last_addr = qemu_host_page_size;
240 addr = 0;
241 looped = 1;
242 continue;
243 }
244 prot = page_get_flags(addr);
245 if (prot) {
246 last_addr = addr + qemu_host_page_size;
247 }
248 }
249 mmap_next_start = addr;
250 return last_addr;
251}
252
253
254
255
256
257
258
259abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
260{
261 void *ptr, *prev;
262 abi_ulong addr;
263 int wrapped, repeat;
264
265
266 if (start == 0) {
267 start = mmap_next_start;
268 } else {
269 start &= qemu_host_page_mask;
270 }
271
272 size = HOST_PAGE_ALIGN(size);
273
274 if (RESERVED_VA) {
275 return mmap_find_vma_reserved(start, size);
276 }
277
278 addr = start;
279 wrapped = repeat = 0;
280 prev = 0;
281
282 for (;; prev = ptr) {
283
284
285
286
287
288
289
290 ptr = mmap(g2h(addr), size, PROT_NONE,
291 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
292
293
294 if (ptr == MAP_FAILED) {
295 return (abi_ulong)-1;
296 }
297
298
299
300 repeat = (ptr == prev ? repeat + 1 : 0);
301
302 if (h2g_valid(ptr + size - 1)) {
303 addr = h2g(ptr);
304
305 if ((addr & ~TARGET_PAGE_MASK) == 0) {
306
307 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
308 mmap_next_start = addr + size;
309 }
310 return addr;
311 }
312
313
314 switch (repeat) {
315 case 0:
316
317
318
319 addr = TARGET_PAGE_ALIGN(addr);
320 break;
321 case 1:
322
323
324 addr &= TARGET_PAGE_MASK;
325 break;
326 case 2:
327
328 addr = 0;
329 break;
330 default:
331
332 addr = -1;
333 break;
334 }
335 } else {
336
337
338 addr = (repeat ? -1 : 0);
339 }
340
341
342 munmap(ptr, size);
343
344
345 if (addr == (abi_ulong)-1) {
346 return (abi_ulong)-1;
347 } else if (addr == 0) {
348 if (wrapped) {
349 return (abi_ulong)-1;
350 }
351 wrapped = 1;
352
353
354 addr = (mmap_min_addr > TARGET_PAGE_SIZE
355 ? TARGET_PAGE_ALIGN(mmap_min_addr)
356 : TARGET_PAGE_SIZE);
357 } else if (wrapped && addr >= start) {
358 return (abi_ulong)-1;
359 }
360 }
361}
362
363
364abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
365 int flags, int fd, abi_ulong offset)
366{
367 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
368 unsigned long host_start;
369
370 mmap_lock();
371#ifdef DEBUG_MMAP
372 {
373 printf("mmap: start=0x" TARGET_ABI_FMT_lx
374 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
375 start, len,
376 prot & PROT_READ ? 'r' : '-',
377 prot & PROT_WRITE ? 'w' : '-',
378 prot & PROT_EXEC ? 'x' : '-');
379 if (flags & MAP_FIXED)
380 printf("MAP_FIXED ");
381 if (flags & MAP_ANONYMOUS)
382 printf("MAP_ANON ");
383 switch(flags & MAP_TYPE) {
384 case MAP_PRIVATE:
385 printf("MAP_PRIVATE ");
386 break;
387 case MAP_SHARED:
388 printf("MAP_SHARED ");
389 break;
390 default:
391 printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
392 break;
393 }
394 printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
395 }
396#endif
397
398 if (offset & ~TARGET_PAGE_MASK) {
399 errno = EINVAL;
400 goto fail;
401 }
402
403 len = TARGET_PAGE_ALIGN(len);
404 if (len == 0)
405 goto the_end;
406 real_start = start & qemu_host_page_mask;
407
408
409
410
411
412
413
414
415
416
417
418
419
420 if ((qemu_real_host_page_size < TARGET_PAGE_SIZE)
421 && !(flags & MAP_ANONYMOUS)) {
422 struct stat sb;
423
424 if (fstat (fd, &sb) == -1)
425 goto fail;
426
427
428 if (offset + len > sb.st_size) {
429
430
431
432 len = (sb.st_size - offset);
433 len += qemu_real_host_page_size - 1;
434 len &= ~(qemu_real_host_page_size - 1);
435 }
436 }
437
438 if (!(flags & MAP_FIXED)) {
439 abi_ulong mmap_start;
440 void *p;
441 host_offset = offset & qemu_host_page_mask;
442 host_len = len + offset - host_offset;
443 host_len = HOST_PAGE_ALIGN(host_len);
444 mmap_start = mmap_find_vma(real_start, host_len);
445 if (mmap_start == (abi_ulong)-1) {
446 errno = ENOMEM;
447 goto fail;
448 }
449
450
451
452 p = mmap(g2h(mmap_start),
453 host_len, prot, flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
454 if (p == MAP_FAILED)
455 goto fail;
456
457 host_start = (unsigned long)p;
458 if (!(flags & MAP_ANONYMOUS)) {
459 p = mmap(g2h(mmap_start), len, prot,
460 flags | MAP_FIXED, fd, host_offset);
461 host_start += offset - host_offset;
462 }
463 start = h2g(host_start);
464 } else {
465 if (start & ~TARGET_PAGE_MASK) {
466 errno = EINVAL;
467 goto fail;
468 }
469 end = start + len;
470 real_end = HOST_PAGE_ALIGN(end);
471
472
473
474
475
476
477 if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
478 errno = EINVAL;
479 goto fail;
480 }
481
482
483
484 if (!(flags & MAP_ANONYMOUS) &&
485 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
486
487
488 if ((flags & MAP_TYPE) == MAP_SHARED &&
489 (prot & PROT_WRITE)) {
490 errno = EINVAL;
491 goto fail;
492 }
493 retaddr = target_mmap(start, len, prot | PROT_WRITE,
494 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
495 -1, 0);
496 if (retaddr == -1)
497 goto fail;
498 if (pread(fd, g2h(start), len, offset) == -1)
499 goto fail;
500 if (!(prot & PROT_WRITE)) {
501 ret = target_mprotect(start, len, prot);
502 if (ret != 0) {
503 start = ret;
504 goto the_end;
505 }
506 }
507 goto the_end;
508 }
509
510
511 if (start > real_start) {
512 if (real_end == real_start + qemu_host_page_size) {
513
514 ret = mmap_frag(real_start, start, end,
515 prot, flags, fd, offset);
516 if (ret == -1)
517 goto fail;
518 goto the_end1;
519 }
520 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
521 prot, flags, fd, offset);
522 if (ret == -1)
523 goto fail;
524 real_start += qemu_host_page_size;
525 }
526
527 if (end < real_end) {
528 ret = mmap_frag(real_end - qemu_host_page_size,
529 real_end - qemu_host_page_size, real_end,
530 prot, flags, fd,
531 offset + real_end - qemu_host_page_size - start);
532 if (ret == -1)
533 goto fail;
534 real_end -= qemu_host_page_size;
535 }
536
537
538 if (real_start < real_end) {
539 void *p;
540 unsigned long offset1;
541 if (flags & MAP_ANONYMOUS)
542 offset1 = 0;
543 else
544 offset1 = offset + real_start - start;
545 p = mmap(g2h(real_start), real_end - real_start,
546 prot, flags, fd, offset1);
547 if (p == MAP_FAILED)
548 goto fail;
549 }
550 }
551 the_end1:
552 page_set_flags(start, start + len, prot | PAGE_VALID);
553 the_end:
554#ifdef DEBUG_MMAP
555 printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
556 page_dump(stdout);
557 printf("\n");
558#endif
559 mmap_unlock();
560 return start;
561fail:
562 mmap_unlock();
563 return -1;
564}
565
566static void mmap_reserve(abi_ulong start, abi_ulong size)
567{
568 abi_ulong real_start;
569 abi_ulong real_end;
570 abi_ulong addr;
571 abi_ulong end;
572 int prot;
573
574 real_start = start & qemu_host_page_mask;
575 real_end = HOST_PAGE_ALIGN(start + size);
576 end = start + size;
577 if (start > real_start) {
578
579 prot = 0;
580 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
581 prot |= page_get_flags(addr);
582 }
583 if (real_end == real_start + qemu_host_page_size) {
584 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
585 prot |= page_get_flags(addr);
586 }
587 end = real_end;
588 }
589 if (prot != 0)
590 real_start += qemu_host_page_size;
591 }
592 if (end < real_end) {
593 prot = 0;
594 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
595 prot |= page_get_flags(addr);
596 }
597 if (prot != 0)
598 real_end -= qemu_host_page_size;
599 }
600 if (real_start != real_end) {
601 mmap(g2h(real_start), real_end - real_start, PROT_NONE,
602 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
603 -1, 0);
604 }
605}
606
607int target_munmap(abi_ulong start, abi_ulong len)
608{
609 abi_ulong end, real_start, real_end, addr;
610 int prot, ret;
611
612#ifdef DEBUG_MMAP
613 printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
614 TARGET_ABI_FMT_lx "\n",
615 start, len);
616#endif
617 if (start & ~TARGET_PAGE_MASK)
618 return -EINVAL;
619 len = TARGET_PAGE_ALIGN(len);
620 if (len == 0)
621 return -EINVAL;
622 mmap_lock();
623 end = start + len;
624 real_start = start & qemu_host_page_mask;
625 real_end = HOST_PAGE_ALIGN(end);
626
627 if (start > real_start) {
628
629 prot = 0;
630 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
631 prot |= page_get_flags(addr);
632 }
633 if (real_end == real_start + qemu_host_page_size) {
634 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
635 prot |= page_get_flags(addr);
636 }
637 end = real_end;
638 }
639 if (prot != 0)
640 real_start += qemu_host_page_size;
641 }
642 if (end < real_end) {
643 prot = 0;
644 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
645 prot |= page_get_flags(addr);
646 }
647 if (prot != 0)
648 real_end -= qemu_host_page_size;
649 }
650
651 ret = 0;
652
653 if (real_start < real_end) {
654 if (RESERVED_VA) {
655 mmap_reserve(real_start, real_end - real_start);
656 } else {
657 ret = munmap(g2h(real_start), real_end - real_start);
658 }
659 }
660
661 if (ret == 0)
662 page_set_flags(start, start + len, 0);
663 mmap_unlock();
664 return ret;
665}
666
667abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
668 abi_ulong new_size, unsigned long flags,
669 abi_ulong new_addr)
670{
671 int prot;
672 void *host_addr;
673
674 mmap_lock();
675
676 if (flags & MREMAP_FIXED) {
677 host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
678 old_size, new_size,
679 flags,
680 g2h(new_addr));
681
682 if (RESERVED_VA && host_addr != MAP_FAILED) {
683
684
685 mmap_reserve(old_addr, old_size);
686 }
687 } else if (flags & MREMAP_MAYMOVE) {
688 abi_ulong mmap_start;
689
690 mmap_start = mmap_find_vma(0, new_size);
691
692 if (mmap_start == -1) {
693 errno = ENOMEM;
694 host_addr = MAP_FAILED;
695 } else {
696 host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
697 old_size, new_size,
698 flags | MREMAP_FIXED,
699 g2h(mmap_start));
700 if ( RESERVED_VA ) {
701 mmap_reserve(old_addr, old_size);
702 }
703 }
704 } else {
705 int prot = 0;
706 if (RESERVED_VA && old_size < new_size) {
707 abi_ulong addr;
708 for (addr = old_addr + old_size;
709 addr < old_addr + new_size;
710 addr++) {
711 prot |= page_get_flags(addr);
712 }
713 }
714 if (prot == 0) {
715 host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
716 if (host_addr != MAP_FAILED && RESERVED_VA && old_size > new_size) {
717 mmap_reserve(old_addr + old_size, new_size - old_size);
718 }
719 } else {
720 errno = ENOMEM;
721 host_addr = MAP_FAILED;
722 }
723
724 if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
725
726 host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
727 errno = ENOMEM;
728 host_addr = MAP_FAILED;
729 }
730 }
731
732 if (host_addr == MAP_FAILED) {
733 new_addr = -1;
734 } else {
735 new_addr = h2g(host_addr);
736 prot = page_get_flags(old_addr);
737 page_set_flags(old_addr, old_addr + old_size, 0);
738 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
739 }
740 mmap_unlock();
741 return new_addr;
742}
743
744int target_msync(abi_ulong start, abi_ulong len, int flags)
745{
746 abi_ulong end;
747
748 if (start & ~TARGET_PAGE_MASK)
749 return -EINVAL;
750 len = TARGET_PAGE_ALIGN(len);
751 end = start + len;
752 if (end < start)
753 return -EINVAL;
754 if (end == start)
755 return 0;
756
757 start &= qemu_host_page_mask;
758 return msync(g2h(start), end - start, flags);
759}
760