qemu/bsd-user/mmap.c
<<
>>
Prefs
   1/*
   2 *  mmap support for qemu
   3 *
   4 *  Copyright (c) 2003 - 2008 Fabrice Bellard
   5 *
   6 *  This program is free software; you can redistribute it and/or modify
   7 *  it under the terms of the GNU General Public License as published by
   8 *  the Free Software Foundation; either version 2 of the License, or
   9 *  (at your option) any later version.
  10 *
  11 *  This program is distributed in the hope that it will be useful,
  12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 *  GNU General Public License for more details.
  15 *
  16 *  You should have received a copy of the GNU General Public License
  17 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20
  21#include "qemu.h"
  22#include "qemu-common.h"
  23#include "bsd-mman.h"
  24#include "exec/exec-all.h"
  25
  26//#define DEBUG_MMAP
  27
  28static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
  29static __thread int mmap_lock_count;
  30
  31void mmap_lock(void)
  32{
  33    if (mmap_lock_count++ == 0) {
  34        pthread_mutex_lock(&mmap_mutex);
  35    }
  36}
  37
  38void mmap_unlock(void)
  39{
  40    if (--mmap_lock_count == 0) {
  41        pthread_mutex_unlock(&mmap_mutex);
  42    }
  43}
  44
  45bool have_mmap_lock(void)
  46{
  47    return mmap_lock_count > 0 ? true : false;
  48}
  49
  50/* Grab lock to make sure things are in a consistent state after fork().  */
  51void mmap_fork_start(void)
  52{
  53    if (mmap_lock_count)
  54        abort();
  55    pthread_mutex_lock(&mmap_mutex);
  56}
  57
  58void mmap_fork_end(int child)
  59{
  60    if (child)
  61        pthread_mutex_init(&mmap_mutex, NULL);
  62    else
  63        pthread_mutex_unlock(&mmap_mutex);
  64}
  65
  66/* NOTE: all the constants are the HOST ones, but addresses are target. */
  67int target_mprotect(abi_ulong start, abi_ulong len, int prot)
  68{
  69    abi_ulong end, host_start, host_end, addr;
  70    int prot1, ret;
  71
  72#ifdef DEBUG_MMAP
  73    printf("mprotect: start=0x" TARGET_FMT_lx
  74           " len=0x" TARGET_FMT_lx " prot=%c%c%c\n", start, len,
  75           prot & PROT_READ ? 'r' : '-',
  76           prot & PROT_WRITE ? 'w' : '-',
  77           prot & PROT_EXEC ? 'x' : '-');
  78#endif
  79
  80    if ((start & ~TARGET_PAGE_MASK) != 0)
  81        return -EINVAL;
  82    len = TARGET_PAGE_ALIGN(len);
  83    end = start + len;
  84    if (end < start)
  85        return -EINVAL;
  86    prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
  87    if (len == 0)
  88        return 0;
  89
  90    mmap_lock();
  91    host_start = start & qemu_host_page_mask;
  92    host_end = HOST_PAGE_ALIGN(end);
  93    if (start > host_start) {
  94        /* handle host page containing start */
  95        prot1 = prot;
  96        for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
  97            prot1 |= page_get_flags(addr);
  98        }
  99        if (host_end == host_start + qemu_host_page_size) {
 100            for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
 101                prot1 |= page_get_flags(addr);
 102            }
 103            end = host_end;
 104        }
 105        ret = mprotect(g2h_untagged(host_start),
 106                       qemu_host_page_size, prot1 & PAGE_BITS);
 107        if (ret != 0)
 108            goto error;
 109        host_start += qemu_host_page_size;
 110    }
 111    if (end < host_end) {
 112        prot1 = prot;
 113        for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
 114            prot1 |= page_get_flags(addr);
 115        }
 116        ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
 117                       qemu_host_page_size, prot1 & PAGE_BITS);
 118        if (ret != 0)
 119            goto error;
 120        host_end -= qemu_host_page_size;
 121    }
 122
 123    /* handle the pages in the middle */
 124    if (host_start < host_end) {
 125        ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot);
 126        if (ret != 0)
 127            goto error;
 128    }
 129    page_set_flags(start, start + len, prot | PAGE_VALID);
 130    mmap_unlock();
 131    return 0;
 132error:
 133    mmap_unlock();
 134    return ret;
 135}
 136
 137/* map an incomplete host page */
 138static int mmap_frag(abi_ulong real_start,
 139                     abi_ulong start, abi_ulong end,
 140                     int prot, int flags, int fd, abi_ulong offset)
 141{
 142    abi_ulong real_end, addr;
 143    void *host_start;
 144    int prot1, prot_new;
 145
 146    real_end = real_start + qemu_host_page_size;
 147    host_start = g2h_untagged(real_start);
 148
 149    /* get the protection of the target pages outside the mapping */
 150    prot1 = 0;
 151    for(addr = real_start; addr < real_end; addr++) {
 152        if (addr < start || addr >= end)
 153            prot1 |= page_get_flags(addr);
 154    }
 155
 156    if (prot1 == 0) {
 157        /* no page was there, so we allocate one */
 158        void *p = mmap(host_start, qemu_host_page_size, prot,
 159                       flags | MAP_ANON, -1, 0);
 160        if (p == MAP_FAILED)
 161            return -1;
 162        prot1 = prot;
 163    }
 164    prot1 &= PAGE_BITS;
 165
 166    prot_new = prot | prot1;
 167    if (!(flags & MAP_ANON)) {
 168        /* msync() won't work here, so we return an error if write is
 169           possible while it is a shared mapping */
 170        if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
 171            (prot & PROT_WRITE))
 172            return -1;
 173
 174        /* adjust protection to be able to read */
 175        if (!(prot1 & PROT_WRITE))
 176            mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
 177
 178        /* read the corresponding file data */
 179        pread(fd, g2h_untagged(start), end - start, offset);
 180
 181        /* put final protection */
 182        if (prot_new != (prot1 | PROT_WRITE))
 183            mprotect(host_start, qemu_host_page_size, prot_new);
 184    } else {
 185        /* just update the protection */
 186        if (prot_new != prot1) {
 187            mprotect(host_start, qemu_host_page_size, prot_new);
 188        }
 189    }
 190    return 0;
 191}
 192
 193static abi_ulong mmap_next_start = 0x40000000;
 194
 195unsigned long last_brk;
 196
 197/* find a free memory area of size 'size'. The search starts at
 198   'start'. If 'start' == 0, then a default start address is used.
 199   Return -1 if error.
 200*/
 201/* page_init() marks pages used by the host as reserved to be sure not
 202   to use them. */
 203static abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
 204{
 205    abi_ulong addr, addr1, addr_start;
 206    int prot;
 207    unsigned long new_brk;
 208
 209    new_brk = (unsigned long)sbrk(0);
 210    if (last_brk && last_brk < new_brk && last_brk == (target_ulong)last_brk) {
 211        /* This is a hack to catch the host allocating memory with brk().
 212           If it uses mmap then we loose.
 213           FIXME: We really want to avoid the host allocating memory in
 214           the first place, and maybe leave some slack to avoid switching
 215           to mmap.  */
 216        page_set_flags(last_brk & TARGET_PAGE_MASK,
 217                       TARGET_PAGE_ALIGN(new_brk),
 218                       PAGE_RESERVED);
 219    }
 220    last_brk = new_brk;
 221
 222    size = HOST_PAGE_ALIGN(size);
 223    start = start & qemu_host_page_mask;
 224    addr = start;
 225    if (addr == 0)
 226        addr = mmap_next_start;
 227    addr_start = addr;
 228    for(;;) {
 229        prot = 0;
 230        for(addr1 = addr; addr1 < (addr + size); addr1 += TARGET_PAGE_SIZE) {
 231            prot |= page_get_flags(addr1);
 232        }
 233        if (prot == 0)
 234            break;
 235        addr += qemu_host_page_size;
 236        /* we found nothing */
 237        if (addr == addr_start)
 238            return (abi_ulong)-1;
 239    }
 240    if (start == 0)
 241        mmap_next_start = addr + size;
 242    return addr;
 243}
 244
 245/* NOTE: all the constants are the HOST ones */
 246abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
 247                     int flags, int fd, abi_ulong offset)
 248{
 249    abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
 250    unsigned long host_start;
 251
 252    mmap_lock();
 253#ifdef DEBUG_MMAP
 254    {
 255        printf("mmap: start=0x" TARGET_FMT_lx
 256               " len=0x" TARGET_FMT_lx " prot=%c%c%c flags=",
 257               start, len,
 258               prot & PROT_READ ? 'r' : '-',
 259               prot & PROT_WRITE ? 'w' : '-',
 260               prot & PROT_EXEC ? 'x' : '-');
 261        if (flags & MAP_FIXED)
 262            printf("MAP_FIXED ");
 263        if (flags & MAP_ANON)
 264            printf("MAP_ANON ");
 265        switch(flags & TARGET_BSD_MAP_FLAGMASK) {
 266        case MAP_PRIVATE:
 267            printf("MAP_PRIVATE ");
 268            break;
 269        case MAP_SHARED:
 270            printf("MAP_SHARED ");
 271            break;
 272        default:
 273            printf("[MAP_FLAGMASK=0x%x] ", flags & TARGET_BSD_MAP_FLAGMASK);
 274            break;
 275        }
 276        printf("fd=%d offset=" TARGET_FMT_lx "\n", fd, offset);
 277    }
 278#endif
 279
 280    if (offset & ~TARGET_PAGE_MASK) {
 281        errno = EINVAL;
 282        goto fail;
 283    }
 284
 285    len = TARGET_PAGE_ALIGN(len);
 286    if (len == 0)
 287        goto the_end;
 288    real_start = start & qemu_host_page_mask;
 289
 290    if (!(flags & MAP_FIXED)) {
 291        abi_ulong mmap_start;
 292        void *p;
 293        host_offset = offset & qemu_host_page_mask;
 294        host_len = len + offset - host_offset;
 295        host_len = HOST_PAGE_ALIGN(host_len);
 296        mmap_start = mmap_find_vma(real_start, host_len);
 297        if (mmap_start == (abi_ulong)-1) {
 298            errno = ENOMEM;
 299            goto fail;
 300        }
 301        /* Note: we prefer to control the mapping address. It is
 302           especially important if qemu_host_page_size >
 303           qemu_real_host_page_size */
 304        p = mmap(g2h_untagged(mmap_start),
 305                 host_len, prot, flags | MAP_FIXED, fd, host_offset);
 306        if (p == MAP_FAILED)
 307            goto fail;
 308        /* update start so that it points to the file position at 'offset' */
 309        host_start = (unsigned long)p;
 310        if (!(flags & MAP_ANON))
 311            host_start += offset - host_offset;
 312        start = h2g(host_start);
 313    } else {
 314        int flg;
 315        target_ulong addr;
 316
 317        if (start & ~TARGET_PAGE_MASK) {
 318            errno = EINVAL;
 319            goto fail;
 320        }
 321        end = start + len;
 322        real_end = HOST_PAGE_ALIGN(end);
 323
 324        for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
 325            flg = page_get_flags(addr);
 326            if (flg & PAGE_RESERVED) {
 327                errno = ENXIO;
 328                goto fail;
 329            }
 330        }
 331
 332        /* worst case: we cannot map the file because the offset is not
 333           aligned, so we read it */
 334        if (!(flags & MAP_ANON) &&
 335            (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
 336            /* msync() won't work here, so we return an error if write is
 337               possible while it is a shared mapping */
 338            if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
 339                (prot & PROT_WRITE)) {
 340                errno = EINVAL;
 341                goto fail;
 342            }
 343            retaddr = target_mmap(start, len, prot | PROT_WRITE,
 344                                  MAP_FIXED | MAP_PRIVATE | MAP_ANON,
 345                                  -1, 0);
 346            if (retaddr == -1)
 347                goto fail;
 348            pread(fd, g2h_untagged(start), len, offset);
 349            if (!(prot & PROT_WRITE)) {
 350                ret = target_mprotect(start, len, prot);
 351                if (ret != 0) {
 352                    start = ret;
 353                    goto the_end;
 354                }
 355            }
 356            goto the_end;
 357        }
 358
 359        /* handle the start of the mapping */
 360        if (start > real_start) {
 361            if (real_end == real_start + qemu_host_page_size) {
 362                /* one single host page */
 363                ret = mmap_frag(real_start, start, end,
 364                                prot, flags, fd, offset);
 365                if (ret == -1)
 366                    goto fail;
 367                goto the_end1;
 368            }
 369            ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
 370                            prot, flags, fd, offset);
 371            if (ret == -1)
 372                goto fail;
 373            real_start += qemu_host_page_size;
 374        }
 375        /* handle the end of the mapping */
 376        if (end < real_end) {
 377            ret = mmap_frag(real_end - qemu_host_page_size,
 378                            real_end - qemu_host_page_size, real_end,
 379                            prot, flags, fd,
 380                            offset + real_end - qemu_host_page_size - start);
 381            if (ret == -1)
 382                goto fail;
 383            real_end -= qemu_host_page_size;
 384        }
 385
 386        /* map the middle (easier) */
 387        if (real_start < real_end) {
 388            void *p;
 389            unsigned long offset1;
 390            if (flags & MAP_ANON)
 391                offset1 = 0;
 392            else
 393                offset1 = offset + real_start - start;
 394            p = mmap(g2h_untagged(real_start), real_end - real_start,
 395                     prot, flags, fd, offset1);
 396            if (p == MAP_FAILED)
 397                goto fail;
 398        }
 399    }
 400 the_end1:
 401    page_set_flags(start, start + len, prot | PAGE_VALID);
 402 the_end:
 403#ifdef DEBUG_MMAP
 404    printf("ret=0x" TARGET_FMT_lx "\n", start);
 405    page_dump(stdout);
 406    printf("\n");
 407#endif
 408    mmap_unlock();
 409    return start;
 410fail:
 411    mmap_unlock();
 412    return -1;
 413}
 414
 415int target_munmap(abi_ulong start, abi_ulong len)
 416{
 417    abi_ulong end, real_start, real_end, addr;
 418    int prot, ret;
 419
 420#ifdef DEBUG_MMAP
 421    printf("munmap: start=0x%lx len=0x%lx\n", start, len);
 422#endif
 423    if (start & ~TARGET_PAGE_MASK)
 424        return -EINVAL;
 425    len = TARGET_PAGE_ALIGN(len);
 426    if (len == 0)
 427        return -EINVAL;
 428    mmap_lock();
 429    end = start + len;
 430    real_start = start & qemu_host_page_mask;
 431    real_end = HOST_PAGE_ALIGN(end);
 432
 433    if (start > real_start) {
 434        /* handle host page containing start */
 435        prot = 0;
 436        for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
 437            prot |= page_get_flags(addr);
 438        }
 439        if (real_end == real_start + qemu_host_page_size) {
 440            for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
 441                prot |= page_get_flags(addr);
 442            }
 443            end = real_end;
 444        }
 445        if (prot != 0)
 446            real_start += qemu_host_page_size;
 447    }
 448    if (end < real_end) {
 449        prot = 0;
 450        for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
 451            prot |= page_get_flags(addr);
 452        }
 453        if (prot != 0)
 454            real_end -= qemu_host_page_size;
 455    }
 456
 457    ret = 0;
 458    /* unmap what we can */
 459    if (real_start < real_end) {
 460        ret = munmap(g2h_untagged(real_start), real_end - real_start);
 461    }
 462
 463    if (ret == 0)
 464        page_set_flags(start, start + len, 0);
 465    mmap_unlock();
 466    return ret;
 467}
 468
 469int target_msync(abi_ulong start, abi_ulong len, int flags)
 470{
 471    abi_ulong end;
 472
 473    if (start & ~TARGET_PAGE_MASK)
 474        return -EINVAL;
 475    len = TARGET_PAGE_ALIGN(len);
 476    end = start + len;
 477    if (end < start)
 478        return -EINVAL;
 479    if (end == start)
 480        return 0;
 481
 482    start &= qemu_host_page_mask;
 483    return msync(g2h_untagged(start), end - start, flags);
 484}
 485