qemu/linux-user/mmap.c
<<
>>
Prefs
   1/*
   2 *  mmap support for qemu
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 *  This program is free software; you can redistribute it and/or modify
   7 *  it under the terms of the GNU General Public License as published by
   8 *  the Free Software Foundation; either version 2 of the License, or
   9 *  (at your option) any later version.
  10 *
  11 *  This program is distributed in the hope that it will be useful,
  12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 *  GNU General Public License for more details.
  15 *
  16 *  You should have received a copy of the GNU General Public License
  17 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20
  21#include "qemu.h"
  22#include "qemu-common.h"
  23#include "translate-all.h"
  24
  25//#define DEBUG_MMAP
  26
  27static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
  28static __thread int mmap_lock_count;
  29
  30void mmap_lock(void)
  31{
  32    if (mmap_lock_count++ == 0) {
  33        pthread_mutex_lock(&mmap_mutex);
  34    }
  35}
  36
  37void mmap_unlock(void)
  38{
  39    if (--mmap_lock_count == 0) {
  40        pthread_mutex_unlock(&mmap_mutex);
  41    }
  42}
  43
  44bool have_mmap_lock(void)
  45{
  46    return mmap_lock_count > 0 ? true : false;
  47}
  48
  49/* Grab lock to make sure things are in a consistent state after fork().  */
  50void mmap_fork_start(void)
  51{
  52    if (mmap_lock_count)
  53        abort();
  54    pthread_mutex_lock(&mmap_mutex);
  55}
  56
  57void mmap_fork_end(int child)
  58{
  59    if (child)
  60        pthread_mutex_init(&mmap_mutex, NULL);
  61    else
  62        pthread_mutex_unlock(&mmap_mutex);
  63}
  64
  65/* NOTE: all the constants are the HOST ones, but addresses are target. */
  66int target_mprotect(abi_ulong start, abi_ulong len, int prot)
  67{
  68    abi_ulong end, host_start, host_end, addr;
  69    int prot1, ret;
  70
  71#ifdef DEBUG_MMAP
  72    printf("mprotect: start=0x" TARGET_ABI_FMT_lx
  73           "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
  74           prot & PROT_READ ? 'r' : '-',
  75           prot & PROT_WRITE ? 'w' : '-',
  76           prot & PROT_EXEC ? 'x' : '-');
  77#endif
  78
  79    if ((start & ~TARGET_PAGE_MASK) != 0)
  80        return -TARGET_EINVAL;
  81    len = TARGET_PAGE_ALIGN(len);
  82    end = start + len;
  83    if (!guest_range_valid(start, len)) {
  84        return -TARGET_ENOMEM;
  85    }
  86    prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
  87    if (len == 0)
  88        return 0;
  89
  90    mmap_lock();
  91    host_start = start & qemu_host_page_mask;
  92    host_end = HOST_PAGE_ALIGN(end);
  93    if (start > host_start) {
  94        /* handle host page containing start */
  95        prot1 = prot;
  96        for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
  97            prot1 |= page_get_flags(addr);
  98        }
  99        if (host_end == host_start + qemu_host_page_size) {
 100            for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
 101                prot1 |= page_get_flags(addr);
 102            }
 103            end = host_end;
 104        }
 105        ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
 106        if (ret != 0)
 107            goto error;
 108        host_start += qemu_host_page_size;
 109    }
 110    if (end < host_end) {
 111        prot1 = prot;
 112        for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
 113            prot1 |= page_get_flags(addr);
 114        }
 115        ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
 116                       prot1 & PAGE_BITS);
 117        if (ret != 0)
 118            goto error;
 119        host_end -= qemu_host_page_size;
 120    }
 121
 122    /* handle the pages in the middle */
 123    if (host_start < host_end) {
 124        ret = mprotect(g2h(host_start), host_end - host_start, prot);
 125        if (ret != 0)
 126            goto error;
 127    }
 128    page_set_flags(start, start + len, prot | PAGE_VALID);
 129    mmap_unlock();
 130    return 0;
 131error:
 132    mmap_unlock();
 133    return ret;
 134}
 135
 136/* map an incomplete host page */
 137static int mmap_frag(abi_ulong real_start,
 138                     abi_ulong start, abi_ulong end,
 139                     int prot, int flags, int fd, abi_ulong offset)
 140{
 141    abi_ulong real_end, addr;
 142    void *host_start;
 143    int prot1, prot_new;
 144
 145    real_end = real_start + qemu_host_page_size;
 146    host_start = g2h(real_start);
 147
 148    /* get the protection of the target pages outside the mapping */
 149    prot1 = 0;
 150    for(addr = real_start; addr < real_end; addr++) {
 151        if (addr < start || addr >= end)
 152            prot1 |= page_get_flags(addr);
 153    }
 154
 155    if (prot1 == 0) {
 156        /* no page was there, so we allocate one */
 157        void *p = mmap(host_start, qemu_host_page_size, prot,
 158                       flags | MAP_ANONYMOUS, -1, 0);
 159        if (p == MAP_FAILED)
 160            return -1;
 161        prot1 = prot;
 162    }
 163    prot1 &= PAGE_BITS;
 164
 165    prot_new = prot | prot1;
 166    if (!(flags & MAP_ANONYMOUS)) {
 167        /* msync() won't work here, so we return an error if write is
 168           possible while it is a shared mapping */
 169        if ((flags & MAP_TYPE) == MAP_SHARED &&
 170            (prot & PROT_WRITE))
 171            return -1;
 172
 173        /* adjust protection to be able to read */
 174        if (!(prot1 & PROT_WRITE))
 175            mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
 176
 177        /* read the corresponding file data */
 178        if (pread(fd, g2h(start), end - start, offset) == -1)
 179            return -1;
 180
 181        /* put final protection */
 182        if (prot_new != (prot1 | PROT_WRITE))
 183            mprotect(host_start, qemu_host_page_size, prot_new);
 184    } else {
 185        if (prot_new != prot1) {
 186            mprotect(host_start, qemu_host_page_size, prot_new);
 187        }
 188        if (prot_new & PROT_WRITE) {
 189            memset(g2h(start), 0, end - start);
 190        }
 191    }
 192    return 0;
 193}
 194
 195#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
 196# define TASK_UNMAPPED_BASE  (1ul << 38)
 197#else
 198# define TASK_UNMAPPED_BASE  0x40000000
 199#endif
 200abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
 201
 202unsigned long last_brk;
 203
 204/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
 205   of guest address space.  */
 206static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
 207{
 208    abi_ulong addr;
 209    abi_ulong end_addr;
 210    int prot;
 211    int looped = 0;
 212
 213    if (size > reserved_va) {
 214        return (abi_ulong)-1;
 215    }
 216
 217    size = HOST_PAGE_ALIGN(size);
 218    end_addr = start + size;
 219    if (end_addr > reserved_va) {
 220        end_addr = reserved_va;
 221    }
 222    addr = end_addr - qemu_host_page_size;
 223
 224    while (1) {
 225        if (addr > end_addr) {
 226            if (looped) {
 227                return (abi_ulong)-1;
 228            }
 229            end_addr = reserved_va;
 230            addr = end_addr - qemu_host_page_size;
 231            looped = 1;
 232            continue;
 233        }
 234        prot = page_get_flags(addr);
 235        if (prot) {
 236            end_addr = addr;
 237        }
 238        if (addr && addr + size == end_addr) {
 239            break;
 240        }
 241        addr -= qemu_host_page_size;
 242    }
 243
 244    if (start == mmap_next_start) {
 245        mmap_next_start = addr;
 246    }
 247
 248    return addr;
 249}
 250
 251/*
 252 * Find and reserve a free memory area of size 'size'. The search
 253 * starts at 'start'.
 254 * It must be called with mmap_lock() held.
 255 * Return -1 if error.
 256 */
 257abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
 258{
 259    void *ptr, *prev;
 260    abi_ulong addr;
 261    int wrapped, repeat;
 262
 263    /* If 'start' == 0, then a default start address is used. */
 264    if (start == 0) {
 265        start = mmap_next_start;
 266    } else {
 267        start &= qemu_host_page_mask;
 268    }
 269
 270    size = HOST_PAGE_ALIGN(size);
 271
 272    if (reserved_va) {
 273        return mmap_find_vma_reserved(start, size);
 274    }
 275
 276    addr = start;
 277    wrapped = repeat = 0;
 278    prev = 0;
 279
 280    for (;; prev = ptr) {
 281        /*
 282         * Reserve needed memory area to avoid a race.
 283         * It should be discarded using:
 284         *  - mmap() with MAP_FIXED flag
 285         *  - mremap() with MREMAP_FIXED flag
 286         *  - shmat() with SHM_REMAP flag
 287         */
 288        ptr = mmap(g2h(addr), size, PROT_NONE,
 289                   MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
 290
 291        /* ENOMEM, if host address space has no memory */
 292        if (ptr == MAP_FAILED) {
 293            return (abi_ulong)-1;
 294        }
 295
 296        /* Count the number of sequential returns of the same address.
 297           This is used to modify the search algorithm below.  */
 298        repeat = (ptr == prev ? repeat + 1 : 0);
 299
 300        if (h2g_valid(ptr + size - 1)) {
 301            addr = h2g(ptr);
 302
 303            if ((addr & ~TARGET_PAGE_MASK) == 0) {
 304                /* Success.  */
 305                if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
 306                    mmap_next_start = addr + size;
 307                }
 308                return addr;
 309            }
 310
 311            /* The address is not properly aligned for the target.  */
 312            switch (repeat) {
 313            case 0:
 314                /* Assume the result that the kernel gave us is the
 315                   first with enough free space, so start again at the
 316                   next higher target page.  */
 317                addr = TARGET_PAGE_ALIGN(addr);
 318                break;
 319            case 1:
 320                /* Sometimes the kernel decides to perform the allocation
 321                   at the top end of memory instead.  */
 322                addr &= TARGET_PAGE_MASK;
 323                break;
 324            case 2:
 325                /* Start over at low memory.  */
 326                addr = 0;
 327                break;
 328            default:
 329                /* Fail.  This unaligned block must the last.  */
 330                addr = -1;
 331                break;
 332            }
 333        } else {
 334            /* Since the result the kernel gave didn't fit, start
 335               again at low memory.  If any repetition, fail.  */
 336            addr = (repeat ? -1 : 0);
 337        }
 338
 339        /* Unmap and try again.  */
 340        munmap(ptr, size);
 341
 342        /* ENOMEM if we checked the whole of the target address space.  */
 343        if (addr == (abi_ulong)-1) {
 344            return (abi_ulong)-1;
 345        } else if (addr == 0) {
 346            if (wrapped) {
 347                return (abi_ulong)-1;
 348            }
 349            wrapped = 1;
 350            /* Don't actually use 0 when wrapping, instead indicate
 351               that we'd truly like an allocation in low memory.  */
 352            addr = (mmap_min_addr > TARGET_PAGE_SIZE
 353                     ? TARGET_PAGE_ALIGN(mmap_min_addr)
 354                     : TARGET_PAGE_SIZE);
 355        } else if (wrapped && addr >= start) {
 356            return (abi_ulong)-1;
 357        }
 358    }
 359}
 360
 361/* NOTE: all the constants are the HOST ones */
 362abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
 363                     int flags, int fd, abi_ulong offset)
 364{
 365    abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
 366
 367    mmap_lock();
 368#ifdef DEBUG_MMAP
 369    {
 370        printf("mmap: start=0x" TARGET_ABI_FMT_lx
 371               " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
 372               start, len,
 373               prot & PROT_READ ? 'r' : '-',
 374               prot & PROT_WRITE ? 'w' : '-',
 375               prot & PROT_EXEC ? 'x' : '-');
 376        if (flags & MAP_FIXED)
 377            printf("MAP_FIXED ");
 378        if (flags & MAP_ANONYMOUS)
 379            printf("MAP_ANON ");
 380        switch(flags & MAP_TYPE) {
 381        case MAP_PRIVATE:
 382            printf("MAP_PRIVATE ");
 383            break;
 384        case MAP_SHARED:
 385            printf("MAP_SHARED ");
 386            break;
 387        default:
 388            printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
 389            break;
 390        }
 391        printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
 392    }
 393#endif
 394
 395    if (offset & ~TARGET_PAGE_MASK) {
 396        errno = EINVAL;
 397        goto fail;
 398    }
 399
 400    len = TARGET_PAGE_ALIGN(len);
 401    if (len == 0)
 402        goto the_end;
 403    real_start = start & qemu_host_page_mask;
 404    host_offset = offset & qemu_host_page_mask;
 405
 406    /* If the user is asking for the kernel to find a location, do that
 407       before we truncate the length for mapping files below.  */
 408    if (!(flags & MAP_FIXED)) {
 409        host_len = len + offset - host_offset;
 410        host_len = HOST_PAGE_ALIGN(host_len);
 411        start = mmap_find_vma(real_start, host_len);
 412        if (start == (abi_ulong)-1) {
 413            errno = ENOMEM;
 414            goto fail;
 415        }
 416    }
 417
 418    /* When mapping files into a memory area larger than the file, accesses
 419       to pages beyond the file size will cause a SIGBUS. 
 420
 421       For example, if mmaping a file of 100 bytes on a host with 4K pages
 422       emulating a target with 8K pages, the target expects to be able to
 423       access the first 8K. But the host will trap us on any access beyond
 424       4K.  
 425
 426       When emulating a target with a larger page-size than the hosts, we
 427       may need to truncate file maps at EOF and add extra anonymous pages
 428       up to the targets page boundary.  */
 429
 430    if ((qemu_real_host_page_size < qemu_host_page_size) &&
 431        !(flags & MAP_ANONYMOUS)) {
 432        struct stat sb;
 433
 434       if (fstat (fd, &sb) == -1)
 435           goto fail;
 436
 437       /* Are we trying to create a map beyond EOF?.  */
 438       if (offset + len > sb.st_size) {
 439           /* If so, truncate the file map at eof aligned with 
 440              the hosts real pagesize. Additional anonymous maps
 441              will be created beyond EOF.  */
 442           len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
 443       }
 444    }
 445
 446    if (!(flags & MAP_FIXED)) {
 447        unsigned long host_start;
 448        void *p;
 449
 450        host_len = len + offset - host_offset;
 451        host_len = HOST_PAGE_ALIGN(host_len);
 452
 453        /* Note: we prefer to control the mapping address. It is
 454           especially important if qemu_host_page_size >
 455           qemu_real_host_page_size */
 456        p = mmap(g2h(start), host_len, prot,
 457                 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
 458        if (p == MAP_FAILED)
 459            goto fail;
 460        /* update start so that it points to the file position at 'offset' */
 461        host_start = (unsigned long)p;
 462        if (!(flags & MAP_ANONYMOUS)) {
 463            p = mmap(g2h(start), len, prot,
 464                     flags | MAP_FIXED, fd, host_offset);
 465            if (p == MAP_FAILED) {
 466                munmap(g2h(start), host_len);
 467                goto fail;
 468            }
 469            host_start += offset - host_offset;
 470        }
 471        start = h2g(host_start);
 472    } else {
 473        if (start & ~TARGET_PAGE_MASK) {
 474            errno = EINVAL;
 475            goto fail;
 476        }
 477        end = start + len;
 478        real_end = HOST_PAGE_ALIGN(end);
 479
 480        /*
 481         * Test if requested memory area fits target address space
 482         * It can fail only on 64-bit host with 32-bit target.
 483         * On any other target/host host mmap() handles this error correctly.
 484         */
 485        if (!guest_range_valid(start, len)) {
 486            errno = ENOMEM;
 487            goto fail;
 488        }
 489
 490        /* worst case: we cannot map the file because the offset is not
 491           aligned, so we read it */
 492        if (!(flags & MAP_ANONYMOUS) &&
 493            (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
 494            /* msync() won't work here, so we return an error if write is
 495               possible while it is a shared mapping */
 496            if ((flags & MAP_TYPE) == MAP_SHARED &&
 497                (prot & PROT_WRITE)) {
 498                errno = EINVAL;
 499                goto fail;
 500            }
 501            retaddr = target_mmap(start, len, prot | PROT_WRITE,
 502                                  MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
 503                                  -1, 0);
 504            if (retaddr == -1)
 505                goto fail;
 506            if (pread(fd, g2h(start), len, offset) == -1)
 507                goto fail;
 508            if (!(prot & PROT_WRITE)) {
 509                ret = target_mprotect(start, len, prot);
 510                assert(ret == 0);
 511            }
 512            goto the_end;
 513        }
 514        
 515        /* handle the start of the mapping */
 516        if (start > real_start) {
 517            if (real_end == real_start + qemu_host_page_size) {
 518                /* one single host page */
 519                ret = mmap_frag(real_start, start, end,
 520                                prot, flags, fd, offset);
 521                if (ret == -1)
 522                    goto fail;
 523                goto the_end1;
 524            }
 525            ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
 526                            prot, flags, fd, offset);
 527            if (ret == -1)
 528                goto fail;
 529            real_start += qemu_host_page_size;
 530        }
 531        /* handle the end of the mapping */
 532        if (end < real_end) {
 533            ret = mmap_frag(real_end - qemu_host_page_size,
 534                            real_end - qemu_host_page_size, end,
 535                            prot, flags, fd,
 536                            offset + real_end - qemu_host_page_size - start);
 537            if (ret == -1)
 538                goto fail;
 539            real_end -= qemu_host_page_size;
 540        }
 541
 542        /* map the middle (easier) */
 543        if (real_start < real_end) {
 544            void *p;
 545            unsigned long offset1;
 546            if (flags & MAP_ANONYMOUS)
 547                offset1 = 0;
 548            else
 549                offset1 = offset + real_start - start;
 550            p = mmap(g2h(real_start), real_end - real_start,
 551                     prot, flags, fd, offset1);
 552            if (p == MAP_FAILED)
 553                goto fail;
 554        }
 555    }
 556 the_end1:
 557    page_set_flags(start, start + len, prot | PAGE_VALID);
 558 the_end:
 559#ifdef DEBUG_MMAP
 560    printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
 561    page_dump(stdout);
 562    printf("\n");
 563#endif
 564    tb_invalidate_phys_range(start, start + len);
 565    mmap_unlock();
 566    return start;
 567fail:
 568    mmap_unlock();
 569    return -1;
 570}
 571
 572static void mmap_reserve(abi_ulong start, abi_ulong size)
 573{
 574    abi_ulong real_start;
 575    abi_ulong real_end;
 576    abi_ulong addr;
 577    abi_ulong end;
 578    int prot;
 579
 580    real_start = start & qemu_host_page_mask;
 581    real_end = HOST_PAGE_ALIGN(start + size);
 582    end = start + size;
 583    if (start > real_start) {
 584        /* handle host page containing start */
 585        prot = 0;
 586        for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
 587            prot |= page_get_flags(addr);
 588        }
 589        if (real_end == real_start + qemu_host_page_size) {
 590            for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
 591                prot |= page_get_flags(addr);
 592            }
 593            end = real_end;
 594        }
 595        if (prot != 0)
 596            real_start += qemu_host_page_size;
 597    }
 598    if (end < real_end) {
 599        prot = 0;
 600        for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
 601            prot |= page_get_flags(addr);
 602        }
 603        if (prot != 0)
 604            real_end -= qemu_host_page_size;
 605    }
 606    if (real_start != real_end) {
 607        mmap(g2h(real_start), real_end - real_start, PROT_NONE,
 608                 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
 609                 -1, 0);
 610    }
 611}
 612
 613int target_munmap(abi_ulong start, abi_ulong len)
 614{
 615    abi_ulong end, real_start, real_end, addr;
 616    int prot, ret;
 617
 618#ifdef DEBUG_MMAP
 619    printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
 620           TARGET_ABI_FMT_lx "\n",
 621           start, len);
 622#endif
 623    if (start & ~TARGET_PAGE_MASK)
 624        return -TARGET_EINVAL;
 625    len = TARGET_PAGE_ALIGN(len);
 626    if (len == 0 || !guest_range_valid(start, len)) {
 627        return -TARGET_EINVAL;
 628    }
 629
 630    mmap_lock();
 631    end = start + len;
 632    real_start = start & qemu_host_page_mask;
 633    real_end = HOST_PAGE_ALIGN(end);
 634
 635    if (start > real_start) {
 636        /* handle host page containing start */
 637        prot = 0;
 638        for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
 639            prot |= page_get_flags(addr);
 640        }
 641        if (real_end == real_start + qemu_host_page_size) {
 642            for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
 643                prot |= page_get_flags(addr);
 644            }
 645            end = real_end;
 646        }
 647        if (prot != 0)
 648            real_start += qemu_host_page_size;
 649    }
 650    if (end < real_end) {
 651        prot = 0;
 652        for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
 653            prot |= page_get_flags(addr);
 654        }
 655        if (prot != 0)
 656            real_end -= qemu_host_page_size;
 657    }
 658
 659    ret = 0;
 660    /* unmap what we can */
 661    if (real_start < real_end) {
 662        if (reserved_va) {
 663            mmap_reserve(real_start, real_end - real_start);
 664        } else {
 665            ret = munmap(g2h(real_start), real_end - real_start);
 666        }
 667    }
 668
 669    if (ret == 0) {
 670        page_set_flags(start, start + len, 0);
 671        tb_invalidate_phys_range(start, start + len);
 672    }
 673    mmap_unlock();
 674    return ret;
 675}
 676
 677abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
 678                       abi_ulong new_size, unsigned long flags,
 679                       abi_ulong new_addr)
 680{
 681    int prot;
 682    void *host_addr;
 683
 684    if (!guest_range_valid(old_addr, old_size) ||
 685        ((flags & MREMAP_FIXED) &&
 686         !guest_range_valid(new_addr, new_size))) {
 687        errno = ENOMEM;
 688        return -1;
 689    }
 690
 691    mmap_lock();
 692
 693    if (flags & MREMAP_FIXED) {
 694        host_addr = mremap(g2h(old_addr), old_size, new_size,
 695                           flags, g2h(new_addr));
 696
 697        if (reserved_va && host_addr != MAP_FAILED) {
 698            /* If new and old addresses overlap then the above mremap will
 699               already have failed with EINVAL.  */
 700            mmap_reserve(old_addr, old_size);
 701        }
 702    } else if (flags & MREMAP_MAYMOVE) {
 703        abi_ulong mmap_start;
 704
 705        mmap_start = mmap_find_vma(0, new_size);
 706
 707        if (mmap_start == -1) {
 708            errno = ENOMEM;
 709            host_addr = MAP_FAILED;
 710        } else {
 711            host_addr = mremap(g2h(old_addr), old_size, new_size,
 712                               flags | MREMAP_FIXED, g2h(mmap_start));
 713            if (reserved_va) {
 714                mmap_reserve(old_addr, old_size);
 715            }
 716        }
 717    } else {
 718        int prot = 0;
 719        if (reserved_va && old_size < new_size) {
 720            abi_ulong addr;
 721            for (addr = old_addr + old_size;
 722                 addr < old_addr + new_size;
 723                 addr++) {
 724                prot |= page_get_flags(addr);
 725            }
 726        }
 727        if (prot == 0) {
 728            host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
 729            if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
 730                mmap_reserve(old_addr + old_size, new_size - old_size);
 731            }
 732        } else {
 733            errno = ENOMEM;
 734            host_addr = MAP_FAILED;
 735        }
 736        /* Check if address fits target address space */
 737        if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
 738            /* Revert mremap() changes */
 739            host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
 740            errno = ENOMEM;
 741            host_addr = MAP_FAILED;
 742        }
 743    }
 744
 745    if (host_addr == MAP_FAILED) {
 746        new_addr = -1;
 747    } else {
 748        new_addr = h2g(host_addr);
 749        prot = page_get_flags(old_addr);
 750        page_set_flags(old_addr, old_addr + old_size, 0);
 751        page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
 752    }
 753    tb_invalidate_phys_range(new_addr, new_addr + new_size);
 754    mmap_unlock();
 755    return new_addr;
 756}
 757