qemu/util/oslib-posix.c
<<
>>
Prefs
   1/*
   2 * os-posix-lib.c
   3 *
   4 * Copyright (c) 2003-2008 Fabrice Bellard
   5 * Copyright (c) 2010 Red Hat, Inc.
   6 *
   7 * QEMU library functions on POSIX which are shared between QEMU and
   8 * the QEMU tools.
   9 *
  10 * Permission is hereby granted, free of charge, to any person obtaining a copy
  11 * of this software and associated documentation files (the "Software"), to deal
  12 * in the Software without restriction, including without limitation the rights
  13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  14 * copies of the Software, and to permit persons to whom the Software is
  15 * furnished to do so, subject to the following conditions:
  16 *
  17 * The above copyright notice and this permission notice shall be included in
  18 * all copies or substantial portions of the Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  26 * THE SOFTWARE.
  27 */
  28
  29#include "qemu/osdep.h"
  30#include <termios.h>
  31
  32#include <glib/gprintf.h>
  33
  34#include "sysemu/sysemu.h"
  35#include "trace.h"
  36#include "qapi/error.h"
  37#include "qemu/error-report.h"
  38#include "qemu/madvise.h"
  39#include "qemu/sockets.h"
  40#include "qemu/thread.h"
  41#include <libgen.h>
  42#include "qemu/cutils.h"
  43#include "qemu/units.h"
  44#include "qemu/thread-context.h"
  45
  46#ifdef CONFIG_LINUX
  47#include <sys/syscall.h>
  48#endif
  49
  50#ifdef __FreeBSD__
  51#include <sys/thr.h>
  52#include <sys/user.h>
  53#include <libutil.h>
  54#endif
  55
  56#ifdef __NetBSD__
  57#include <lwp.h>
  58#endif
  59
  60#include "qemu/mmap-alloc.h"
  61
  62#define MAX_MEM_PREALLOC_THREAD_COUNT 16
  63
  64struct MemsetThread;
  65
  66typedef struct MemsetContext {
  67    bool all_threads_created;
  68    bool any_thread_failed;
  69    struct MemsetThread *threads;
  70    int num_threads;
  71} MemsetContext;
  72
  73struct MemsetThread {
  74    char *addr;
  75    size_t numpages;
  76    size_t hpagesize;
  77    QemuThread pgthread;
  78    sigjmp_buf env;
  79    MemsetContext *context;
  80};
  81typedef struct MemsetThread MemsetThread;
  82
  83/* used by sigbus_handler() */
  84static MemsetContext *sigbus_memset_context;
  85struct sigaction sigbus_oldact;
  86static QemuMutex sigbus_mutex;
  87
  88static QemuMutex page_mutex;
  89static QemuCond page_cond;
  90
  91int qemu_get_thread_id(void)
  92{
  93#if defined(__linux__)
  94    return syscall(SYS_gettid);
  95#elif defined(__FreeBSD__)
  96    /* thread id is up to INT_MAX */
  97    long tid;
  98    thr_self(&tid);
  99    return (int)tid;
 100#elif defined(__NetBSD__)
 101    return _lwp_self();
 102#elif defined(__OpenBSD__)
 103    return getthrid();
 104#else
 105    return getpid();
 106#endif
 107}
 108
 109int qemu_daemon(int nochdir, int noclose)
 110{
 111    return daemon(nochdir, noclose);
 112}
 113
 114bool qemu_write_pidfile(const char *path, Error **errp)
 115{
 116    int fd;
 117    char pidstr[32];
 118
 119    while (1) {
 120        struct stat a, b;
 121        struct flock lock = {
 122            .l_type = F_WRLCK,
 123            .l_whence = SEEK_SET,
 124            .l_len = 0,
 125        };
 126
 127        fd = qemu_create(path, O_WRONLY, S_IRUSR | S_IWUSR, errp);
 128        if (fd == -1) {
 129            return false;
 130        }
 131
 132        if (fstat(fd, &b) < 0) {
 133            error_setg_errno(errp, errno, "Cannot stat file");
 134            goto fail_close;
 135        }
 136
 137        if (fcntl(fd, F_SETLK, &lock)) {
 138            error_setg_errno(errp, errno, "Cannot lock pid file");
 139            goto fail_close;
 140        }
 141
 142        /*
 143         * Now make sure the path we locked is the same one that now
 144         * exists on the filesystem.
 145         */
 146        if (stat(path, &a) < 0) {
 147            /*
 148             * PID file disappeared, someone else must be racing with
 149             * us, so try again.
 150             */
 151            close(fd);
 152            continue;
 153        }
 154
 155        if (a.st_ino == b.st_ino) {
 156            break;
 157        }
 158
 159        /*
 160         * PID file was recreated, someone else must be racing with
 161         * us, so try again.
 162         */
 163        close(fd);
 164    }
 165
 166    if (ftruncate(fd, 0) < 0) {
 167        error_setg_errno(errp, errno, "Failed to truncate pid file");
 168        goto fail_unlink;
 169    }
 170
 171    snprintf(pidstr, sizeof(pidstr), FMT_pid "\n", getpid());
 172    if (qemu_write_full(fd, pidstr, strlen(pidstr)) != strlen(pidstr)) {
 173        error_setg(errp, "Failed to write pid file");
 174        goto fail_unlink;
 175    }
 176
 177    return true;
 178
 179fail_unlink:
 180    unlink(path);
 181fail_close:
 182    close(fd);
 183    return false;
 184}
 185
 186/* alloc shared memory pages */
 187void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared,
 188                          bool noreserve)
 189{
 190    const uint32_t qemu_map_flags = (shared ? QEMU_MAP_SHARED : 0) |
 191                                    (noreserve ? QEMU_MAP_NORESERVE : 0);
 192    size_t align = QEMU_VMALLOC_ALIGN;
 193    void *ptr = qemu_ram_mmap(-1, size, align, qemu_map_flags, 0);
 194
 195    if (ptr == MAP_FAILED) {
 196        return NULL;
 197    }
 198
 199    if (alignment) {
 200        *alignment = align;
 201    }
 202
 203    trace_qemu_anon_ram_alloc(size, ptr);
 204    return ptr;
 205}
 206
 207void qemu_anon_ram_free(void *ptr, size_t size)
 208{
 209    trace_qemu_anon_ram_free(ptr, size);
 210    qemu_ram_munmap(-1, ptr, size);
 211}
 212
 213void qemu_socket_set_block(int fd)
 214{
 215    g_unix_set_fd_nonblocking(fd, false, NULL);
 216}
 217
 218int qemu_socket_try_set_nonblock(int fd)
 219{
 220    return g_unix_set_fd_nonblocking(fd, true, NULL) ? 0 : -errno;
 221}
 222
 223void qemu_socket_set_nonblock(int fd)
 224{
 225    int f;
 226    f = qemu_socket_try_set_nonblock(fd);
 227    assert(f == 0);
 228}
 229
 230int socket_set_fast_reuse(int fd)
 231{
 232    int val = 1, ret;
 233
 234    ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR,
 235                     (const char *)&val, sizeof(val));
 236
 237    assert(ret == 0);
 238
 239    return ret;
 240}
 241
 242void qemu_set_cloexec(int fd)
 243{
 244    int f;
 245    f = fcntl(fd, F_GETFD);
 246    assert(f != -1);
 247    f = fcntl(fd, F_SETFD, f | FD_CLOEXEC);
 248    assert(f != -1);
 249}
 250
 251int qemu_socketpair(int domain, int type, int protocol, int sv[2])
 252{
 253    int ret;
 254
 255#ifdef SOCK_CLOEXEC
 256    ret = socketpair(domain, type | SOCK_CLOEXEC, protocol, sv);
 257    if (ret != -1 || errno != EINVAL) {
 258        return ret;
 259    }
 260#endif
 261    ret = socketpair(domain, type, protocol, sv);;
 262    if (ret == 0) {
 263        qemu_set_cloexec(sv[0]);
 264        qemu_set_cloexec(sv[1]);
 265    }
 266
 267    return ret;
 268}
 269
 270char *
 271qemu_get_local_state_dir(void)
 272{
 273    return get_relocated_path(CONFIG_QEMU_LOCALSTATEDIR);
 274}
 275
 276void qemu_set_tty_echo(int fd, bool echo)
 277{
 278    struct termios tty;
 279
 280    tcgetattr(fd, &tty);
 281
 282    if (echo) {
 283        tty.c_lflag |= ECHO | ECHONL | ICANON | IEXTEN;
 284    } else {
 285        tty.c_lflag &= ~(ECHO | ECHONL | ICANON | IEXTEN);
 286    }
 287
 288    tcsetattr(fd, TCSANOW, &tty);
 289}
 290
 291#ifdef CONFIG_LINUX
 292static void sigbus_handler(int signal, siginfo_t *siginfo, void *ctx)
 293#else /* CONFIG_LINUX */
 294static void sigbus_handler(int signal)
 295#endif /* CONFIG_LINUX */
 296{
 297    int i;
 298
 299    if (sigbus_memset_context) {
 300        for (i = 0; i < sigbus_memset_context->num_threads; i++) {
 301            MemsetThread *thread = &sigbus_memset_context->threads[i];
 302
 303            if (qemu_thread_is_self(&thread->pgthread)) {
 304                siglongjmp(thread->env, 1);
 305            }
 306        }
 307    }
 308
 309#ifdef CONFIG_LINUX
 310    /*
 311     * We assume that the MCE SIGBUS handler could have been registered. We
 312     * should never receive BUS_MCEERR_AO on any of our threads, but only on
 313     * the main thread registered for PR_MCE_KILL_EARLY. Further, we should not
 314     * receive BUS_MCEERR_AR triggered by action of other threads on one of
 315     * our threads. So, no need to check for unrelated SIGBUS when seeing one
 316     * for our threads.
 317     *
 318     * We will forward to the MCE handler, which will either handle the SIGBUS
 319     * or reinstall the default SIGBUS handler and reraise the SIGBUS. The
 320     * default SIGBUS handler will crash the process, so we don't care.
 321     */
 322    if (sigbus_oldact.sa_flags & SA_SIGINFO) {
 323        sigbus_oldact.sa_sigaction(signal, siginfo, ctx);
 324        return;
 325    }
 326#endif /* CONFIG_LINUX */
 327    warn_report("qemu_prealloc_mem: unrelated SIGBUS detected and ignored");
 328}
 329
 330static void *do_touch_pages(void *arg)
 331{
 332    MemsetThread *memset_args = (MemsetThread *)arg;
 333    sigset_t set, oldset;
 334    int ret = 0;
 335
 336    /*
 337     * On Linux, the page faults from the loop below can cause mmap_sem
 338     * contention with allocation of the thread stacks.  Do not start
 339     * clearing until all threads have been created.
 340     */
 341    qemu_mutex_lock(&page_mutex);
 342    while (!memset_args->context->all_threads_created) {
 343        qemu_cond_wait(&page_cond, &page_mutex);
 344    }
 345    qemu_mutex_unlock(&page_mutex);
 346
 347    /* unblock SIGBUS */
 348    sigemptyset(&set);
 349    sigaddset(&set, SIGBUS);
 350    pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
 351
 352    if (sigsetjmp(memset_args->env, 1)) {
 353        ret = -EFAULT;
 354    } else {
 355        char *addr = memset_args->addr;
 356        size_t numpages = memset_args->numpages;
 357        size_t hpagesize = memset_args->hpagesize;
 358        size_t i;
 359        for (i = 0; i < numpages; i++) {
 360            /*
 361             * Read & write back the same value, so we don't
 362             * corrupt existing user/app data that might be
 363             * stored.
 364             *
 365             * 'volatile' to stop compiler optimizing this away
 366             * to a no-op
 367             */
 368            *(volatile char *)addr = *addr;
 369            addr += hpagesize;
 370        }
 371    }
 372    pthread_sigmask(SIG_SETMASK, &oldset, NULL);
 373    return (void *)(uintptr_t)ret;
 374}
 375
 376static void *do_madv_populate_write_pages(void *arg)
 377{
 378    MemsetThread *memset_args = (MemsetThread *)arg;
 379    const size_t size = memset_args->numpages * memset_args->hpagesize;
 380    char * const addr = memset_args->addr;
 381    int ret = 0;
 382
 383    /* See do_touch_pages(). */
 384    qemu_mutex_lock(&page_mutex);
 385    while (!memset_args->context->all_threads_created) {
 386        qemu_cond_wait(&page_cond, &page_mutex);
 387    }
 388    qemu_mutex_unlock(&page_mutex);
 389
 390    if (size && qemu_madvise(addr, size, QEMU_MADV_POPULATE_WRITE)) {
 391        ret = -errno;
 392    }
 393    return (void *)(uintptr_t)ret;
 394}
 395
 396static inline int get_memset_num_threads(size_t hpagesize, size_t numpages,
 397                                         int max_threads)
 398{
 399    long host_procs = sysconf(_SC_NPROCESSORS_ONLN);
 400    int ret = 1;
 401
 402    if (host_procs > 0) {
 403        ret = MIN(MIN(host_procs, MAX_MEM_PREALLOC_THREAD_COUNT), max_threads);
 404    }
 405
 406    /* Especially with gigantic pages, don't create more threads than pages. */
 407    ret = MIN(ret, numpages);
 408    /* Don't start threads to prealloc comparatively little memory. */
 409    ret = MIN(ret, MAX(1, hpagesize * numpages / (64 * MiB)));
 410
 411    /* In case sysconf() fails, we fall back to single threaded */
 412    return ret;
 413}
 414
 415static int touch_all_pages(char *area, size_t hpagesize, size_t numpages,
 416                           int max_threads, ThreadContext *tc,
 417                           bool use_madv_populate_write)
 418{
 419    static gsize initialized = 0;
 420    MemsetContext context = {
 421        .num_threads = get_memset_num_threads(hpagesize, numpages, max_threads),
 422    };
 423    size_t numpages_per_thread, leftover;
 424    void *(*touch_fn)(void *);
 425    int ret = 0, i = 0;
 426    char *addr = area;
 427
 428    if (g_once_init_enter(&initialized)) {
 429        qemu_mutex_init(&page_mutex);
 430        qemu_cond_init(&page_cond);
 431        g_once_init_leave(&initialized, 1);
 432    }
 433
 434    if (use_madv_populate_write) {
 435        /* Avoid creating a single thread for MADV_POPULATE_WRITE */
 436        if (context.num_threads == 1) {
 437            if (qemu_madvise(area, hpagesize * numpages,
 438                             QEMU_MADV_POPULATE_WRITE)) {
 439                return -errno;
 440            }
 441            return 0;
 442        }
 443        touch_fn = do_madv_populate_write_pages;
 444    } else {
 445        touch_fn = do_touch_pages;
 446    }
 447
 448    context.threads = g_new0(MemsetThread, context.num_threads);
 449    numpages_per_thread = numpages / context.num_threads;
 450    leftover = numpages % context.num_threads;
 451    for (i = 0; i < context.num_threads; i++) {
 452        context.threads[i].addr = addr;
 453        context.threads[i].numpages = numpages_per_thread + (i < leftover);
 454        context.threads[i].hpagesize = hpagesize;
 455        context.threads[i].context = &context;
 456        if (tc) {
 457            thread_context_create_thread(tc, &context.threads[i].pgthread,
 458                                         "touch_pages",
 459                                         touch_fn, &context.threads[i],
 460                                         QEMU_THREAD_JOINABLE);
 461        } else {
 462            qemu_thread_create(&context.threads[i].pgthread, "touch_pages",
 463                               touch_fn, &context.threads[i],
 464                               QEMU_THREAD_JOINABLE);
 465        }
 466        addr += context.threads[i].numpages * hpagesize;
 467    }
 468
 469    if (!use_madv_populate_write) {
 470        sigbus_memset_context = &context;
 471    }
 472
 473    qemu_mutex_lock(&page_mutex);
 474    context.all_threads_created = true;
 475    qemu_cond_broadcast(&page_cond);
 476    qemu_mutex_unlock(&page_mutex);
 477
 478    for (i = 0; i < context.num_threads; i++) {
 479        int tmp = (uintptr_t)qemu_thread_join(&context.threads[i].pgthread);
 480
 481        if (tmp) {
 482            ret = tmp;
 483        }
 484    }
 485
 486    if (!use_madv_populate_write) {
 487        sigbus_memset_context = NULL;
 488    }
 489    g_free(context.threads);
 490
 491    return ret;
 492}
 493
 494static bool madv_populate_write_possible(char *area, size_t pagesize)
 495{
 496    return !qemu_madvise(area, pagesize, QEMU_MADV_POPULATE_WRITE) ||
 497           errno != EINVAL;
 498}
 499
 500void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads,
 501                       ThreadContext *tc, Error **errp)
 502{
 503    static gsize initialized;
 504    int ret;
 505    size_t hpagesize = qemu_fd_getpagesize(fd);
 506    size_t numpages = DIV_ROUND_UP(sz, hpagesize);
 507    bool use_madv_populate_write;
 508    struct sigaction act;
 509
 510    /*
 511     * Sense on every invocation, as MADV_POPULATE_WRITE cannot be used for
 512     * some special mappings, such as mapping /dev/mem.
 513     */
 514    use_madv_populate_write = madv_populate_write_possible(area, hpagesize);
 515
 516    if (!use_madv_populate_write) {
 517        if (g_once_init_enter(&initialized)) {
 518            qemu_mutex_init(&sigbus_mutex);
 519            g_once_init_leave(&initialized, 1);
 520        }
 521
 522        qemu_mutex_lock(&sigbus_mutex);
 523        memset(&act, 0, sizeof(act));
 524#ifdef CONFIG_LINUX
 525        act.sa_sigaction = &sigbus_handler;
 526        act.sa_flags = SA_SIGINFO;
 527#else /* CONFIG_LINUX */
 528        act.sa_handler = &sigbus_handler;
 529        act.sa_flags = 0;
 530#endif /* CONFIG_LINUX */
 531
 532        ret = sigaction(SIGBUS, &act, &sigbus_oldact);
 533        if (ret) {
 534            qemu_mutex_unlock(&sigbus_mutex);
 535            error_setg_errno(errp, errno,
 536                "qemu_prealloc_mem: failed to install signal handler");
 537            return;
 538        }
 539    }
 540
 541    /* touch pages simultaneously */
 542    ret = touch_all_pages(area, hpagesize, numpages, max_threads, tc,
 543                          use_madv_populate_write);
 544    if (ret) {
 545        error_setg_errno(errp, -ret,
 546                         "qemu_prealloc_mem: preallocating memory failed");
 547    }
 548
 549    if (!use_madv_populate_write) {
 550        ret = sigaction(SIGBUS, &sigbus_oldact, NULL);
 551        if (ret) {
 552            /* Terminate QEMU since it can't recover from error */
 553            perror("qemu_prealloc_mem: failed to reinstall signal handler");
 554            exit(1);
 555        }
 556        qemu_mutex_unlock(&sigbus_mutex);
 557    }
 558}
 559
 560char *qemu_get_pid_name(pid_t pid)
 561{
 562    char *name = NULL;
 563
 564#if defined(__FreeBSD__)
 565    /* BSDs don't have /proc, but they provide a nice substitute */
 566    struct kinfo_proc *proc = kinfo_getproc(pid);
 567
 568    if (proc) {
 569        name = g_strdup(proc->ki_comm);
 570        free(proc);
 571    }
 572#else
 573    /* Assume a system with reasonable procfs */
 574    char *pid_path;
 575    size_t len;
 576
 577    pid_path = g_strdup_printf("/proc/%d/cmdline", pid);
 578    g_file_get_contents(pid_path, &name, &len, NULL);
 579    g_free(pid_path);
 580#endif
 581
 582    return name;
 583}
 584
 585
 586void *qemu_alloc_stack(size_t *sz)
 587{
 588    void *ptr, *guardpage;
 589    int flags;
 590#ifdef CONFIG_DEBUG_STACK_USAGE
 591    void *ptr2;
 592#endif
 593    size_t pagesz = qemu_real_host_page_size();
 594#ifdef _SC_THREAD_STACK_MIN
 595    /* avoid stacks smaller than _SC_THREAD_STACK_MIN */
 596    long min_stack_sz = sysconf(_SC_THREAD_STACK_MIN);
 597    *sz = MAX(MAX(min_stack_sz, 0), *sz);
 598#endif
 599    /* adjust stack size to a multiple of the page size */
 600    *sz = ROUND_UP(*sz, pagesz);
 601    /* allocate one extra page for the guard page */
 602    *sz += pagesz;
 603
 604    flags = MAP_PRIVATE | MAP_ANONYMOUS;
 605#if defined(MAP_STACK) && defined(__OpenBSD__)
 606    /* Only enable MAP_STACK on OpenBSD. Other OS's such as
 607     * Linux/FreeBSD/NetBSD have a flag with the same name
 608     * but have differing functionality. OpenBSD will SEGV
 609     * if it spots execution with a stack pointer pointing
 610     * at memory that was not allocated with MAP_STACK.
 611     */
 612    flags |= MAP_STACK;
 613#endif
 614
 615    ptr = mmap(NULL, *sz, PROT_READ | PROT_WRITE, flags, -1, 0);
 616    if (ptr == MAP_FAILED) {
 617        perror("failed to allocate memory for stack");
 618        abort();
 619    }
 620
 621#if defined(HOST_IA64)
 622    /* separate register stack */
 623    guardpage = ptr + (((*sz - pagesz) / 2) & ~pagesz);
 624#elif defined(HOST_HPPA)
 625    /* stack grows up */
 626    guardpage = ptr + *sz - pagesz;
 627#else
 628    /* stack grows down */
 629    guardpage = ptr;
 630#endif
 631    if (mprotect(guardpage, pagesz, PROT_NONE) != 0) {
 632        perror("failed to set up stack guard page");
 633        abort();
 634    }
 635
 636#ifdef CONFIG_DEBUG_STACK_USAGE
 637    for (ptr2 = ptr + pagesz; ptr2 < ptr + *sz; ptr2 += sizeof(uint32_t)) {
 638        *(uint32_t *)ptr2 = 0xdeadbeaf;
 639    }
 640#endif
 641
 642    return ptr;
 643}
 644
 645#ifdef CONFIG_DEBUG_STACK_USAGE
 646static __thread unsigned int max_stack_usage;
 647#endif
 648
 649void qemu_free_stack(void *stack, size_t sz)
 650{
 651#ifdef CONFIG_DEBUG_STACK_USAGE
 652    unsigned int usage;
 653    void *ptr;
 654
 655    for (ptr = stack + qemu_real_host_page_size(); ptr < stack + sz;
 656         ptr += sizeof(uint32_t)) {
 657        if (*(uint32_t *)ptr != 0xdeadbeaf) {
 658            break;
 659        }
 660    }
 661    usage = sz - (uintptr_t) (ptr - stack);
 662    if (usage > max_stack_usage) {
 663        error_report("thread %d max stack usage increased from %u to %u",
 664                     qemu_get_thread_id(), max_stack_usage, usage);
 665        max_stack_usage = usage;
 666    }
 667#endif
 668
 669    munmap(stack, sz);
 670}
 671
 672/*
 673 * Disable CFI checks.
 674 * We are going to call a signal hander directly. Such handler may or may not
 675 * have been defined in our binary, so there's no guarantee that the pointer
 676 * used to set the handler is a cfi-valid pointer. Since the handlers are
 677 * stored in kernel memory, changing the handler to an attacker-defined
 678 * function requires being able to call a sigaction() syscall,
 679 * which is not as easy as overwriting a pointer in memory.
 680 */
 681QEMU_DISABLE_CFI
 682void sigaction_invoke(struct sigaction *action,
 683                      struct qemu_signalfd_siginfo *info)
 684{
 685    siginfo_t si = {};
 686    si.si_signo = info->ssi_signo;
 687    si.si_errno = info->ssi_errno;
 688    si.si_code = info->ssi_code;
 689
 690    /* Convert the minimal set of fields defined by POSIX.
 691     * Positive si_code values are reserved for kernel-generated
 692     * signals, where the valid siginfo fields are determined by
 693     * the signal number.  But according to POSIX, it is unspecified
 694     * whether SI_USER and SI_QUEUE have values less than or equal to
 695     * zero.
 696     */
 697    if (info->ssi_code == SI_USER || info->ssi_code == SI_QUEUE ||
 698        info->ssi_code <= 0) {
 699        /* SIGTERM, etc.  */
 700        si.si_pid = info->ssi_pid;
 701        si.si_uid = info->ssi_uid;
 702    } else if (info->ssi_signo == SIGILL || info->ssi_signo == SIGFPE ||
 703               info->ssi_signo == SIGSEGV || info->ssi_signo == SIGBUS) {
 704        si.si_addr = (void *)(uintptr_t)info->ssi_addr;
 705    } else if (info->ssi_signo == SIGCHLD) {
 706        si.si_pid = info->ssi_pid;
 707        si.si_status = info->ssi_status;
 708        si.si_uid = info->ssi_uid;
 709    }
 710    action->sa_sigaction(info->ssi_signo, &si, NULL);
 711}
 712
 713size_t qemu_get_host_physmem(void)
 714{
 715#ifdef _SC_PHYS_PAGES
 716    long pages = sysconf(_SC_PHYS_PAGES);
 717    if (pages > 0) {
 718        if (pages > SIZE_MAX / qemu_real_host_page_size()) {
 719            return SIZE_MAX;
 720        } else {
 721            return pages * qemu_real_host_page_size();
 722        }
 723    }
 724#endif
 725    return 0;
 726}
 727
 728int qemu_msync(void *addr, size_t length, int fd)
 729{
 730    size_t align_mask = ~(qemu_real_host_page_size() - 1);
 731
 732    /**
 733     * There are no strict reqs as per the length of mapping
 734     * to be synced. Still the length needs to follow the address
 735     * alignment changes. Additionally - round the size to the multiple
 736     * of PAGE_SIZE
 737     */
 738    length += ((uintptr_t)addr & (qemu_real_host_page_size() - 1));
 739    length = (length + ~align_mask) & align_mask;
 740
 741    addr = (void *)((uintptr_t)addr & align_mask);
 742
 743    return msync(addr, length, MS_SYNC);
 744}
 745