qemu/linux-user/signal.c
<<
>>
Prefs
   1/*
   2 *  Emulation of Linux signals
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 *  This program is free software; you can redistribute it and/or modify
   7 *  it under the terms of the GNU General Public License as published by
   8 *  the Free Software Foundation; either version 2 of the License, or
   9 *  (at your option) any later version.
  10 *
  11 *  This program is distributed in the hope that it will be useful,
  12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 *  GNU General Public License for more details.
  15 *
  16 *  You should have received a copy of the GNU General Public License
  17 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "qemu/bitops.h"
  21#include <sys/ucontext.h>
  22#include <sys/resource.h>
  23
  24#include "qemu.h"
  25#include "trace.h"
  26#include "signal-common.h"
  27
  28static struct target_sigaction sigact_table[TARGET_NSIG];
  29
  30static void host_signal_handler(int host_signum, siginfo_t *info,
  31                                void *puc);
  32
  33static uint8_t host_to_target_signal_table[_NSIG] = {
  34    [SIGHUP] = TARGET_SIGHUP,
  35    [SIGINT] = TARGET_SIGINT,
  36    [SIGQUIT] = TARGET_SIGQUIT,
  37    [SIGILL] = TARGET_SIGILL,
  38    [SIGTRAP] = TARGET_SIGTRAP,
  39    [SIGABRT] = TARGET_SIGABRT,
  40/*    [SIGIOT] = TARGET_SIGIOT,*/
  41    [SIGBUS] = TARGET_SIGBUS,
  42    [SIGFPE] = TARGET_SIGFPE,
  43    [SIGKILL] = TARGET_SIGKILL,
  44    [SIGUSR1] = TARGET_SIGUSR1,
  45    [SIGSEGV] = TARGET_SIGSEGV,
  46    [SIGUSR2] = TARGET_SIGUSR2,
  47    [SIGPIPE] = TARGET_SIGPIPE,
  48    [SIGALRM] = TARGET_SIGALRM,
  49    [SIGTERM] = TARGET_SIGTERM,
  50#ifdef SIGSTKFLT
  51    [SIGSTKFLT] = TARGET_SIGSTKFLT,
  52#endif
  53    [SIGCHLD] = TARGET_SIGCHLD,
  54    [SIGCONT] = TARGET_SIGCONT,
  55    [SIGSTOP] = TARGET_SIGSTOP,
  56    [SIGTSTP] = TARGET_SIGTSTP,
  57    [SIGTTIN] = TARGET_SIGTTIN,
  58    [SIGTTOU] = TARGET_SIGTTOU,
  59    [SIGURG] = TARGET_SIGURG,
  60    [SIGXCPU] = TARGET_SIGXCPU,
  61    [SIGXFSZ] = TARGET_SIGXFSZ,
  62    [SIGVTALRM] = TARGET_SIGVTALRM,
  63    [SIGPROF] = TARGET_SIGPROF,
  64    [SIGWINCH] = TARGET_SIGWINCH,
  65    [SIGIO] = TARGET_SIGIO,
  66    [SIGPWR] = TARGET_SIGPWR,
  67    [SIGSYS] = TARGET_SIGSYS,
  68    /* next signals stay the same */
  69    /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
  70       host libpthread signals.  This assumes no one actually uses SIGRTMAX :-/
  71       To fix this properly we need to do manual signal delivery multiplexed
  72       over a single host signal.  */
  73    [__SIGRTMIN] = __SIGRTMAX,
  74    [__SIGRTMAX] = __SIGRTMIN,
  75};
  76static uint8_t target_to_host_signal_table[_NSIG];
  77
  78int host_to_target_signal(int sig)
  79{
  80    if (sig < 0 || sig >= _NSIG)
  81        return sig;
  82    return host_to_target_signal_table[sig];
  83}
  84
  85int target_to_host_signal(int sig)
  86{
  87    if (sig < 0 || sig >= _NSIG)
  88        return sig;
  89    return target_to_host_signal_table[sig];
  90}
  91
  92static inline void target_sigaddset(target_sigset_t *set, int signum)
  93{
  94    signum--;
  95    abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
  96    set->sig[signum / TARGET_NSIG_BPW] |= mask;
  97}
  98
  99static inline int target_sigismember(const target_sigset_t *set, int signum)
 100{
 101    signum--;
 102    abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
 103    return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
 104}
 105
 106void host_to_target_sigset_internal(target_sigset_t *d,
 107                                    const sigset_t *s)
 108{
 109    int i;
 110    target_sigemptyset(d);
 111    for (i = 1; i <= TARGET_NSIG; i++) {
 112        if (sigismember(s, i)) {
 113            target_sigaddset(d, host_to_target_signal(i));
 114        }
 115    }
 116}
 117
 118void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
 119{
 120    target_sigset_t d1;
 121    int i;
 122
 123    host_to_target_sigset_internal(&d1, s);
 124    for(i = 0;i < TARGET_NSIG_WORDS; i++)
 125        d->sig[i] = tswapal(d1.sig[i]);
 126}
 127
 128void target_to_host_sigset_internal(sigset_t *d,
 129                                    const target_sigset_t *s)
 130{
 131    int i;
 132    sigemptyset(d);
 133    for (i = 1; i <= TARGET_NSIG; i++) {
 134        if (target_sigismember(s, i)) {
 135            sigaddset(d, target_to_host_signal(i));
 136        }
 137    }
 138}
 139
 140void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
 141{
 142    target_sigset_t s1;
 143    int i;
 144
 145    for(i = 0;i < TARGET_NSIG_WORDS; i++)
 146        s1.sig[i] = tswapal(s->sig[i]);
 147    target_to_host_sigset_internal(d, &s1);
 148}
 149
 150void host_to_target_old_sigset(abi_ulong *old_sigset,
 151                               const sigset_t *sigset)
 152{
 153    target_sigset_t d;
 154    host_to_target_sigset(&d, sigset);
 155    *old_sigset = d.sig[0];
 156}
 157
 158void target_to_host_old_sigset(sigset_t *sigset,
 159                               const abi_ulong *old_sigset)
 160{
 161    target_sigset_t d;
 162    int i;
 163
 164    d.sig[0] = *old_sigset;
 165    for(i = 1;i < TARGET_NSIG_WORDS; i++)
 166        d.sig[i] = 0;
 167    target_to_host_sigset(sigset, &d);
 168}
 169
 170int block_signals(void)
 171{
 172    TaskState *ts = (TaskState *)thread_cpu->opaque;
 173    sigset_t set;
 174
 175    /* It's OK to block everything including SIGSEGV, because we won't
 176     * run any further guest code before unblocking signals in
 177     * process_pending_signals().
 178     */
 179    sigfillset(&set);
 180    sigprocmask(SIG_SETMASK, &set, 0);
 181
 182    return atomic_xchg(&ts->signal_pending, 1);
 183}
 184
 185/* Wrapper for sigprocmask function
 186 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
 187 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
 188 * a signal was already pending and the syscall must be restarted, or
 189 * 0 on success.
 190 * If set is NULL, this is guaranteed not to fail.
 191 */
 192int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
 193{
 194    TaskState *ts = (TaskState *)thread_cpu->opaque;
 195
 196    if (oldset) {
 197        *oldset = ts->signal_mask;
 198    }
 199
 200    if (set) {
 201        int i;
 202
 203        if (block_signals()) {
 204            return -TARGET_ERESTARTSYS;
 205        }
 206
 207        switch (how) {
 208        case SIG_BLOCK:
 209            sigorset(&ts->signal_mask, &ts->signal_mask, set);
 210            break;
 211        case SIG_UNBLOCK:
 212            for (i = 1; i <= NSIG; ++i) {
 213                if (sigismember(set, i)) {
 214                    sigdelset(&ts->signal_mask, i);
 215                }
 216            }
 217            break;
 218        case SIG_SETMASK:
 219            ts->signal_mask = *set;
 220            break;
 221        default:
 222            g_assert_not_reached();
 223        }
 224
 225        /* Silently ignore attempts to change blocking status of KILL or STOP */
 226        sigdelset(&ts->signal_mask, SIGKILL);
 227        sigdelset(&ts->signal_mask, SIGSTOP);
 228    }
 229    return 0;
 230}
 231
 232#if !defined(TARGET_NIOS2)
 233/* Just set the guest's signal mask to the specified value; the
 234 * caller is assumed to have called block_signals() already.
 235 */
 236void set_sigmask(const sigset_t *set)
 237{
 238    TaskState *ts = (TaskState *)thread_cpu->opaque;
 239
 240    ts->signal_mask = *set;
 241}
 242#endif
 243
 244/* sigaltstack management */
 245
 246int on_sig_stack(unsigned long sp)
 247{
 248    TaskState *ts = (TaskState *)thread_cpu->opaque;
 249
 250    return (sp - ts->sigaltstack_used.ss_sp
 251            < ts->sigaltstack_used.ss_size);
 252}
 253
 254int sas_ss_flags(unsigned long sp)
 255{
 256    TaskState *ts = (TaskState *)thread_cpu->opaque;
 257
 258    return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
 259            : on_sig_stack(sp) ? SS_ONSTACK : 0);
 260}
 261
 262abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
 263{
 264    /*
 265     * This is the X/Open sanctioned signal stack switching.
 266     */
 267    TaskState *ts = (TaskState *)thread_cpu->opaque;
 268
 269    if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
 270        return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
 271    }
 272    return sp;
 273}
 274
 275void target_save_altstack(target_stack_t *uss, CPUArchState *env)
 276{
 277    TaskState *ts = (TaskState *)thread_cpu->opaque;
 278
 279    __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
 280    __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
 281    __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
 282}
 283
 284/* siginfo conversion */
 285
 286static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
 287                                                 const siginfo_t *info)
 288{
 289    int sig = host_to_target_signal(info->si_signo);
 290    int si_code = info->si_code;
 291    int si_type;
 292    tinfo->si_signo = sig;
 293    tinfo->si_errno = 0;
 294    tinfo->si_code = info->si_code;
 295
 296    /* This memset serves two purposes:
 297     * (1) ensure we don't leak random junk to the guest later
 298     * (2) placate false positives from gcc about fields
 299     *     being used uninitialized if it chooses to inline both this
 300     *     function and tswap_siginfo() into host_to_target_siginfo().
 301     */
 302    memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
 303
 304    /* This is awkward, because we have to use a combination of
 305     * the si_code and si_signo to figure out which of the union's
 306     * members are valid. (Within the host kernel it is always possible
 307     * to tell, but the kernel carefully avoids giving userspace the
 308     * high 16 bits of si_code, so we don't have the information to
 309     * do this the easy way...) We therefore make our best guess,
 310     * bearing in mind that a guest can spoof most of the si_codes
 311     * via rt_sigqueueinfo() if it likes.
 312     *
 313     * Once we have made our guess, we record it in the top 16 bits of
 314     * the si_code, so that tswap_siginfo() later can use it.
 315     * tswap_siginfo() will strip these top bits out before writing
 316     * si_code to the guest (sign-extending the lower bits).
 317     */
 318
 319    switch (si_code) {
 320    case SI_USER:
 321    case SI_TKILL:
 322    case SI_KERNEL:
 323        /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
 324         * These are the only unspoofable si_code values.
 325         */
 326        tinfo->_sifields._kill._pid = info->si_pid;
 327        tinfo->_sifields._kill._uid = info->si_uid;
 328        si_type = QEMU_SI_KILL;
 329        break;
 330    default:
 331        /* Everything else is spoofable. Make best guess based on signal */
 332        switch (sig) {
 333        case TARGET_SIGCHLD:
 334            tinfo->_sifields._sigchld._pid = info->si_pid;
 335            tinfo->_sifields._sigchld._uid = info->si_uid;
 336            tinfo->_sifields._sigchld._status
 337                = host_to_target_waitstatus(info->si_status);
 338            tinfo->_sifields._sigchld._utime = info->si_utime;
 339            tinfo->_sifields._sigchld._stime = info->si_stime;
 340            si_type = QEMU_SI_CHLD;
 341            break;
 342        case TARGET_SIGIO:
 343            tinfo->_sifields._sigpoll._band = info->si_band;
 344            tinfo->_sifields._sigpoll._fd = info->si_fd;
 345            si_type = QEMU_SI_POLL;
 346            break;
 347        default:
 348            /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
 349            tinfo->_sifields._rt._pid = info->si_pid;
 350            tinfo->_sifields._rt._uid = info->si_uid;
 351            /* XXX: potential problem if 64 bit */
 352            tinfo->_sifields._rt._sigval.sival_ptr
 353                = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
 354            si_type = QEMU_SI_RT;
 355            break;
 356        }
 357        break;
 358    }
 359
 360    tinfo->si_code = deposit32(si_code, 16, 16, si_type);
 361}
 362
 363void tswap_siginfo(target_siginfo_t *tinfo,
 364                   const target_siginfo_t *info)
 365{
 366    int si_type = extract32(info->si_code, 16, 16);
 367    int si_code = sextract32(info->si_code, 0, 16);
 368
 369    __put_user(info->si_signo, &tinfo->si_signo);
 370    __put_user(info->si_errno, &tinfo->si_errno);
 371    __put_user(si_code, &tinfo->si_code);
 372
 373    /* We can use our internal marker of which fields in the structure
 374     * are valid, rather than duplicating the guesswork of
 375     * host_to_target_siginfo_noswap() here.
 376     */
 377    switch (si_type) {
 378    case QEMU_SI_KILL:
 379        __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
 380        __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
 381        break;
 382    case QEMU_SI_TIMER:
 383        __put_user(info->_sifields._timer._timer1,
 384                   &tinfo->_sifields._timer._timer1);
 385        __put_user(info->_sifields._timer._timer2,
 386                   &tinfo->_sifields._timer._timer2);
 387        break;
 388    case QEMU_SI_POLL:
 389        __put_user(info->_sifields._sigpoll._band,
 390                   &tinfo->_sifields._sigpoll._band);
 391        __put_user(info->_sifields._sigpoll._fd,
 392                   &tinfo->_sifields._sigpoll._fd);
 393        break;
 394    case QEMU_SI_FAULT:
 395        __put_user(info->_sifields._sigfault._addr,
 396                   &tinfo->_sifields._sigfault._addr);
 397        break;
 398    case QEMU_SI_CHLD:
 399        __put_user(info->_sifields._sigchld._pid,
 400                   &tinfo->_sifields._sigchld._pid);
 401        __put_user(info->_sifields._sigchld._uid,
 402                   &tinfo->_sifields._sigchld._uid);
 403        __put_user(info->_sifields._sigchld._status,
 404                   &tinfo->_sifields._sigchld._status);
 405        __put_user(info->_sifields._sigchld._utime,
 406                   &tinfo->_sifields._sigchld._utime);
 407        __put_user(info->_sifields._sigchld._stime,
 408                   &tinfo->_sifields._sigchld._stime);
 409        break;
 410    case QEMU_SI_RT:
 411        __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
 412        __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
 413        __put_user(info->_sifields._rt._sigval.sival_ptr,
 414                   &tinfo->_sifields._rt._sigval.sival_ptr);
 415        break;
 416    default:
 417        g_assert_not_reached();
 418    }
 419}
 420
 421void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
 422{
 423    target_siginfo_t tgt_tmp;
 424    host_to_target_siginfo_noswap(&tgt_tmp, info);
 425    tswap_siginfo(tinfo, &tgt_tmp);
 426}
 427
 428/* XXX: we support only POSIX RT signals are used. */
 429/* XXX: find a solution for 64 bit (additional malloced data is needed) */
 430void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
 431{
 432    /* This conversion is used only for the rt_sigqueueinfo syscall,
 433     * and so we know that the _rt fields are the valid ones.
 434     */
 435    abi_ulong sival_ptr;
 436
 437    __get_user(info->si_signo, &tinfo->si_signo);
 438    __get_user(info->si_errno, &tinfo->si_errno);
 439    __get_user(info->si_code, &tinfo->si_code);
 440    __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
 441    __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
 442    __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
 443    info->si_value.sival_ptr = (void *)(long)sival_ptr;
 444}
 445
 446static int fatal_signal (int sig)
 447{
 448    switch (sig) {
 449    case TARGET_SIGCHLD:
 450    case TARGET_SIGURG:
 451    case TARGET_SIGWINCH:
 452        /* Ignored by default.  */
 453        return 0;
 454    case TARGET_SIGCONT:
 455    case TARGET_SIGSTOP:
 456    case TARGET_SIGTSTP:
 457    case TARGET_SIGTTIN:
 458    case TARGET_SIGTTOU:
 459        /* Job control signals.  */
 460        return 0;
 461    default:
 462        return 1;
 463    }
 464}
 465
 466/* returns 1 if given signal should dump core if not handled */
 467static int core_dump_signal(int sig)
 468{
 469    switch (sig) {
 470    case TARGET_SIGABRT:
 471    case TARGET_SIGFPE:
 472    case TARGET_SIGILL:
 473    case TARGET_SIGQUIT:
 474    case TARGET_SIGSEGV:
 475    case TARGET_SIGTRAP:
 476    case TARGET_SIGBUS:
 477        return (1);
 478    default:
 479        return (0);
 480    }
 481}
 482
 483void signal_init(void)
 484{
 485    TaskState *ts = (TaskState *)thread_cpu->opaque;
 486    struct sigaction act;
 487    struct sigaction oact;
 488    int i, j;
 489    int host_sig;
 490
 491    /* generate signal conversion tables */
 492    for(i = 1; i < _NSIG; i++) {
 493        if (host_to_target_signal_table[i] == 0)
 494            host_to_target_signal_table[i] = i;
 495    }
 496    for(i = 1; i < _NSIG; i++) {
 497        j = host_to_target_signal_table[i];
 498        target_to_host_signal_table[j] = i;
 499    }
 500
 501    /* Set the signal mask from the host mask. */
 502    sigprocmask(0, 0, &ts->signal_mask);
 503
 504    /* set all host signal handlers. ALL signals are blocked during
 505       the handlers to serialize them. */
 506    memset(sigact_table, 0, sizeof(sigact_table));
 507
 508    sigfillset(&act.sa_mask);
 509    act.sa_flags = SA_SIGINFO;
 510    act.sa_sigaction = host_signal_handler;
 511    for(i = 1; i <= TARGET_NSIG; i++) {
 512#ifdef TARGET_GPROF
 513        if (i == SIGPROF) {
 514            continue;
 515        }
 516#endif
 517        host_sig = target_to_host_signal(i);
 518        sigaction(host_sig, NULL, &oact);
 519        if (oact.sa_sigaction == (void *)SIG_IGN) {
 520            sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
 521        } else if (oact.sa_sigaction == (void *)SIG_DFL) {
 522            sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
 523        }
 524        /* If there's already a handler installed then something has
 525           gone horribly wrong, so don't even try to handle that case.  */
 526        /* Install some handlers for our own use.  We need at least
 527           SIGSEGV and SIGBUS, to detect exceptions.  We can not just
 528           trap all signals because it affects syscall interrupt
 529           behavior.  But do trap all default-fatal signals.  */
 530        if (fatal_signal (i))
 531            sigaction(host_sig, &act, NULL);
 532    }
 533}
 534
 535/* Force a synchronously taken signal. The kernel force_sig() function
 536 * also forces the signal to "not blocked, not ignored", but for QEMU
 537 * that work is done in process_pending_signals().
 538 */
 539void force_sig(int sig)
 540{
 541    CPUState *cpu = thread_cpu;
 542    CPUArchState *env = cpu->env_ptr;
 543    target_siginfo_t info;
 544
 545    info.si_signo = sig;
 546    info.si_errno = 0;
 547    info.si_code = TARGET_SI_KERNEL;
 548    info._sifields._kill._pid = 0;
 549    info._sifields._kill._uid = 0;
 550    queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
 551}
 552
 553/* Force a SIGSEGV if we couldn't write to memory trying to set
 554 * up the signal frame. oldsig is the signal we were trying to handle
 555 * at the point of failure.
 556 */
 557#if !defined(TARGET_RISCV)
 558void force_sigsegv(int oldsig)
 559{
 560    if (oldsig == SIGSEGV) {
 561        /* Make sure we don't try to deliver the signal again; this will
 562         * end up with handle_pending_signal() calling dump_core_and_abort().
 563         */
 564        sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
 565    }
 566    force_sig(TARGET_SIGSEGV);
 567}
 568
 569#endif
 570
 571/* abort execution with signal */
 572static void QEMU_NORETURN dump_core_and_abort(int target_sig)
 573{
 574    CPUState *cpu = thread_cpu;
 575    CPUArchState *env = cpu->env_ptr;
 576    TaskState *ts = (TaskState *)cpu->opaque;
 577    int host_sig, core_dumped = 0;
 578    struct sigaction act;
 579
 580    host_sig = target_to_host_signal(target_sig);
 581    trace_user_force_sig(env, target_sig, host_sig);
 582    gdb_signalled(env, target_sig);
 583
 584    /* dump core if supported by target binary format */
 585    if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
 586        stop_all_tasks();
 587        core_dumped =
 588            ((*ts->bprm->core_dump)(target_sig, env) == 0);
 589    }
 590    if (core_dumped) {
 591        /* we already dumped the core of target process, we don't want
 592         * a coredump of qemu itself */
 593        struct rlimit nodump;
 594        getrlimit(RLIMIT_CORE, &nodump);
 595        nodump.rlim_cur=0;
 596        setrlimit(RLIMIT_CORE, &nodump);
 597        (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
 598            target_sig, strsignal(host_sig), "core dumped" );
 599    }
 600
 601    /* The proper exit code for dying from an uncaught signal is
 602     * -<signal>.  The kernel doesn't allow exit() or _exit() to pass
 603     * a negative value.  To get the proper exit code we need to
 604     * actually die from an uncaught signal.  Here the default signal
 605     * handler is installed, we send ourself a signal and we wait for
 606     * it to arrive. */
 607    sigfillset(&act.sa_mask);
 608    act.sa_handler = SIG_DFL;
 609    act.sa_flags = 0;
 610    sigaction(host_sig, &act, NULL);
 611
 612    /* For some reason raise(host_sig) doesn't send the signal when
 613     * statically linked on x86-64. */
 614    kill(getpid(), host_sig);
 615
 616    /* Make sure the signal isn't masked (just reuse the mask inside
 617    of act) */
 618    sigdelset(&act.sa_mask, host_sig);
 619    sigsuspend(&act.sa_mask);
 620
 621    /* unreachable */
 622    abort();
 623}
 624
 625/* queue a signal so that it will be send to the virtual CPU as soon
 626   as possible */
 627int queue_signal(CPUArchState *env, int sig, int si_type,
 628                 target_siginfo_t *info)
 629{
 630    CPUState *cpu = env_cpu(env);
 631    TaskState *ts = cpu->opaque;
 632
 633    trace_user_queue_signal(env, sig);
 634
 635    info->si_code = deposit32(info->si_code, 16, 16, si_type);
 636
 637    ts->sync_signal.info = *info;
 638    ts->sync_signal.pending = sig;
 639    /* signal that a new signal is pending */
 640    atomic_set(&ts->signal_pending, 1);
 641    return 1; /* indicates that the signal was queued */
 642}
 643
 644#ifndef HAVE_SAFE_SYSCALL
 645static inline void rewind_if_in_safe_syscall(void *puc)
 646{
 647    /* Default version: never rewind */
 648}
 649#endif
 650
 651static void host_signal_handler(int host_signum, siginfo_t *info,
 652                                void *puc)
 653{
 654    CPUArchState *env = thread_cpu->env_ptr;
 655    CPUState *cpu = env_cpu(env);
 656    TaskState *ts = cpu->opaque;
 657
 658    int sig;
 659    target_siginfo_t tinfo;
 660    ucontext_t *uc = puc;
 661    struct emulated_sigtable *k;
 662
 663    /* the CPU emulator uses some host signals to detect exceptions,
 664       we forward to it some signals */
 665    if ((host_signum == SIGSEGV || host_signum == SIGBUS)
 666        && info->si_code > 0) {
 667        if (cpu_signal_handler(host_signum, info, puc))
 668            return;
 669    }
 670
 671    /* get target signal number */
 672    sig = host_to_target_signal(host_signum);
 673    if (sig < 1 || sig > TARGET_NSIG)
 674        return;
 675    trace_user_host_signal(env, host_signum, sig);
 676
 677    rewind_if_in_safe_syscall(puc);
 678
 679    host_to_target_siginfo_noswap(&tinfo, info);
 680    k = &ts->sigtab[sig - 1];
 681    k->info = tinfo;
 682    k->pending = sig;
 683    ts->signal_pending = 1;
 684
 685    /* Block host signals until target signal handler entered. We
 686     * can't block SIGSEGV or SIGBUS while we're executing guest
 687     * code in case the guest code provokes one in the window between
 688     * now and it getting out to the main loop. Signals will be
 689     * unblocked again in process_pending_signals().
 690     *
 691     * WARNING: we cannot use sigfillset() here because the uc_sigmask
 692     * field is a kernel sigset_t, which is much smaller than the
 693     * libc sigset_t which sigfillset() operates on. Using sigfillset()
 694     * would write 0xff bytes off the end of the structure and trash
 695     * data on the struct.
 696     * We can't use sizeof(uc->uc_sigmask) either, because the libc
 697     * headers define the struct field with the wrong (too large) type.
 698     */
 699    memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
 700    sigdelset(&uc->uc_sigmask, SIGSEGV);
 701    sigdelset(&uc->uc_sigmask, SIGBUS);
 702
 703    /* interrupt the virtual CPU as soon as possible */
 704    cpu_exit(thread_cpu);
 705}
 706
 707/* do_sigaltstack() returns target values and errnos. */
 708/* compare linux/kernel/signal.c:do_sigaltstack() */
 709abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
 710{
 711    int ret;
 712    struct target_sigaltstack oss;
 713    TaskState *ts = (TaskState *)thread_cpu->opaque;
 714
 715    /* XXX: test errors */
 716    if(uoss_addr)
 717    {
 718        __put_user(ts->sigaltstack_used.ss_sp, &oss.ss_sp);
 719        __put_user(ts->sigaltstack_used.ss_size, &oss.ss_size);
 720        __put_user(sas_ss_flags(sp), &oss.ss_flags);
 721    }
 722
 723    if(uss_addr)
 724    {
 725        struct target_sigaltstack *uss;
 726        struct target_sigaltstack ss;
 727        size_t minstacksize = TARGET_MINSIGSTKSZ;
 728
 729#if defined(TARGET_PPC64)
 730        /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
 731        struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
 732        if (get_ppc64_abi(image) > 1) {
 733            minstacksize = 4096;
 734        }
 735#endif
 736
 737        ret = -TARGET_EFAULT;
 738        if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
 739            goto out;
 740        }
 741        __get_user(ss.ss_sp, &uss->ss_sp);
 742        __get_user(ss.ss_size, &uss->ss_size);
 743        __get_user(ss.ss_flags, &uss->ss_flags);
 744        unlock_user_struct(uss, uss_addr, 0);
 745
 746        ret = -TARGET_EPERM;
 747        if (on_sig_stack(sp))
 748            goto out;
 749
 750        ret = -TARGET_EINVAL;
 751        if (ss.ss_flags != TARGET_SS_DISABLE
 752            && ss.ss_flags != TARGET_SS_ONSTACK
 753            && ss.ss_flags != 0)
 754            goto out;
 755
 756        if (ss.ss_flags == TARGET_SS_DISABLE) {
 757            ss.ss_size = 0;
 758            ss.ss_sp = 0;
 759        } else {
 760            ret = -TARGET_ENOMEM;
 761            if (ss.ss_size < minstacksize) {
 762                goto out;
 763            }
 764        }
 765
 766        ts->sigaltstack_used.ss_sp = ss.ss_sp;
 767        ts->sigaltstack_used.ss_size = ss.ss_size;
 768    }
 769
 770    if (uoss_addr) {
 771        ret = -TARGET_EFAULT;
 772        if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
 773            goto out;
 774    }
 775
 776    ret = 0;
 777out:
 778    return ret;
 779}
 780
 781/* do_sigaction() return target values and host errnos */
 782int do_sigaction(int sig, const struct target_sigaction *act,
 783                 struct target_sigaction *oact)
 784{
 785    struct target_sigaction *k;
 786    struct sigaction act1;
 787    int host_sig;
 788    int ret = 0;
 789
 790    if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
 791        return -TARGET_EINVAL;
 792    }
 793
 794    if (block_signals()) {
 795        return -TARGET_ERESTARTSYS;
 796    }
 797
 798    k = &sigact_table[sig - 1];
 799    if (oact) {
 800        __put_user(k->_sa_handler, &oact->_sa_handler);
 801        __put_user(k->sa_flags, &oact->sa_flags);
 802#ifdef TARGET_ARCH_HAS_SA_RESTORER
 803        __put_user(k->sa_restorer, &oact->sa_restorer);
 804#endif
 805        /* Not swapped.  */
 806        oact->sa_mask = k->sa_mask;
 807    }
 808    if (act) {
 809        /* FIXME: This is not threadsafe.  */
 810        __get_user(k->_sa_handler, &act->_sa_handler);
 811        __get_user(k->sa_flags, &act->sa_flags);
 812#ifdef TARGET_ARCH_HAS_SA_RESTORER
 813        __get_user(k->sa_restorer, &act->sa_restorer);
 814#endif
 815        /* To be swapped in target_to_host_sigset.  */
 816        k->sa_mask = act->sa_mask;
 817
 818        /* we update the host linux signal state */
 819        host_sig = target_to_host_signal(sig);
 820        if (host_sig != SIGSEGV && host_sig != SIGBUS) {
 821            sigfillset(&act1.sa_mask);
 822            act1.sa_flags = SA_SIGINFO;
 823            if (k->sa_flags & TARGET_SA_RESTART)
 824                act1.sa_flags |= SA_RESTART;
 825            /* NOTE: it is important to update the host kernel signal
 826               ignore state to avoid getting unexpected interrupted
 827               syscalls */
 828            if (k->_sa_handler == TARGET_SIG_IGN) {
 829                act1.sa_sigaction = (void *)SIG_IGN;
 830            } else if (k->_sa_handler == TARGET_SIG_DFL) {
 831                if (fatal_signal (sig))
 832                    act1.sa_sigaction = host_signal_handler;
 833                else
 834                    act1.sa_sigaction = (void *)SIG_DFL;
 835            } else {
 836                act1.sa_sigaction = host_signal_handler;
 837            }
 838            ret = sigaction(host_sig, &act1, NULL);
 839        }
 840    }
 841    return ret;
 842}
 843
 844static void handle_pending_signal(CPUArchState *cpu_env, int sig,
 845                                  struct emulated_sigtable *k)
 846{
 847    CPUState *cpu = env_cpu(cpu_env);
 848    abi_ulong handler;
 849    sigset_t set;
 850    target_sigset_t target_old_set;
 851    struct target_sigaction *sa;
 852    TaskState *ts = cpu->opaque;
 853
 854    trace_user_handle_signal(cpu_env, sig);
 855    /* dequeue signal */
 856    k->pending = 0;
 857
 858    sig = gdb_handlesig(cpu, sig);
 859    if (!sig) {
 860        sa = NULL;
 861        handler = TARGET_SIG_IGN;
 862    } else {
 863        sa = &sigact_table[sig - 1];
 864        handler = sa->_sa_handler;
 865    }
 866
 867    if (do_strace) {
 868        print_taken_signal(sig, &k->info);
 869    }
 870
 871    if (handler == TARGET_SIG_DFL) {
 872        /* default handler : ignore some signal. The other are job control or fatal */
 873        if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
 874            kill(getpid(),SIGSTOP);
 875        } else if (sig != TARGET_SIGCHLD &&
 876                   sig != TARGET_SIGURG &&
 877                   sig != TARGET_SIGWINCH &&
 878                   sig != TARGET_SIGCONT) {
 879            dump_core_and_abort(sig);
 880        }
 881    } else if (handler == TARGET_SIG_IGN) {
 882        /* ignore sig */
 883    } else if (handler == TARGET_SIG_ERR) {
 884        dump_core_and_abort(sig);
 885    } else {
 886        /* compute the blocked signals during the handler execution */
 887        sigset_t *blocked_set;
 888
 889        target_to_host_sigset(&set, &sa->sa_mask);
 890        /* SA_NODEFER indicates that the current signal should not be
 891           blocked during the handler */
 892        if (!(sa->sa_flags & TARGET_SA_NODEFER))
 893            sigaddset(&set, target_to_host_signal(sig));
 894
 895        /* save the previous blocked signal state to restore it at the
 896           end of the signal execution (see do_sigreturn) */
 897        host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
 898
 899        /* block signals in the handler */
 900        blocked_set = ts->in_sigsuspend ?
 901            &ts->sigsuspend_mask : &ts->signal_mask;
 902        sigorset(&ts->signal_mask, blocked_set, &set);
 903        ts->in_sigsuspend = 0;
 904
 905        /* if the CPU is in VM86 mode, we restore the 32 bit values */
 906#if defined(TARGET_I386) && !defined(TARGET_X86_64)
 907        {
 908            CPUX86State *env = cpu_env;
 909            if (env->eflags & VM_MASK)
 910                save_v86_state(env);
 911        }
 912#endif
 913        /* prepare the stack frame of the virtual CPU */
 914#if defined(TARGET_ARCH_HAS_SETUP_FRAME)
 915        if (sa->sa_flags & TARGET_SA_SIGINFO) {
 916            setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
 917        } else {
 918            setup_frame(sig, sa, &target_old_set, cpu_env);
 919        }
 920#else
 921        /* These targets do not have traditional signals.  */
 922        setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
 923#endif
 924        if (sa->sa_flags & TARGET_SA_RESETHAND) {
 925            sa->_sa_handler = TARGET_SIG_DFL;
 926        }
 927    }
 928}
 929
 930void process_pending_signals(CPUArchState *cpu_env)
 931{
 932    CPUState *cpu = env_cpu(cpu_env);
 933    int sig;
 934    TaskState *ts = cpu->opaque;
 935    sigset_t set;
 936    sigset_t *blocked_set;
 937
 938    while (atomic_read(&ts->signal_pending)) {
 939        /* FIXME: This is not threadsafe.  */
 940        sigfillset(&set);
 941        sigprocmask(SIG_SETMASK, &set, 0);
 942
 943    restart_scan:
 944        sig = ts->sync_signal.pending;
 945        if (sig) {
 946            /* Synchronous signals are forced,
 947             * see force_sig_info() and callers in Linux
 948             * Note that not all of our queue_signal() calls in QEMU correspond
 949             * to force_sig_info() calls in Linux (some are send_sig_info()).
 950             * However it seems like a kernel bug to me to allow the process
 951             * to block a synchronous signal since it could then just end up
 952             * looping round and round indefinitely.
 953             */
 954            if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
 955                || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
 956                sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
 957                sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
 958            }
 959
 960            handle_pending_signal(cpu_env, sig, &ts->sync_signal);
 961        }
 962
 963        for (sig = 1; sig <= TARGET_NSIG; sig++) {
 964            blocked_set = ts->in_sigsuspend ?
 965                &ts->sigsuspend_mask : &ts->signal_mask;
 966
 967            if (ts->sigtab[sig - 1].pending &&
 968                (!sigismember(blocked_set,
 969                              target_to_host_signal_table[sig]))) {
 970                handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
 971                /* Restart scan from the beginning, as handle_pending_signal
 972                 * might have resulted in a new synchronous signal (eg SIGSEGV).
 973                 */
 974                goto restart_scan;
 975            }
 976        }
 977
 978        /* if no signal is pending, unblock signals and recheck (the act
 979         * of unblocking might cause us to take another host signal which
 980         * will set signal_pending again).
 981         */
 982        atomic_set(&ts->signal_pending, 0);
 983        ts->in_sigsuspend = 0;
 984        set = ts->signal_mask;
 985        sigdelset(&set, SIGSEGV);
 986        sigdelset(&set, SIGBUS);
 987        sigprocmask(SIG_SETMASK, &set, 0);
 988    }
 989    ts->in_sigsuspend = 0;
 990}
 991