qemu/linux-user/signal.c
<<
>>
Prefs
   1/*
   2 *  Emulation of Linux signals
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 *  This program is free software; you can redistribute it and/or modify
   7 *  it under the terms of the GNU General Public License as published by
   8 *  the Free Software Foundation; either version 2 of the License, or
   9 *  (at your option) any later version.
  10 *
  11 *  This program is distributed in the hope that it will be useful,
  12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 *  GNU General Public License for more details.
  15 *
  16 *  You should have received a copy of the GNU General Public License
  17 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "qemu/bitops.h"
  21#include "exec/gdbstub.h"
  22#include "hw/core/tcg-cpu-ops.h"
  23
  24#include <sys/ucontext.h>
  25#include <sys/resource.h>
  26
  27#include "qemu.h"
  28#include "user-internals.h"
  29#include "strace.h"
  30#include "loader.h"
  31#include "trace.h"
  32#include "signal-common.h"
  33#include "host-signal.h"
  34#include "safe-syscall.h"
  35
  36static struct target_sigaction sigact_table[TARGET_NSIG];
  37
  38static void host_signal_handler(int host_signum, siginfo_t *info,
  39                                void *puc);
  40
  41/* Fallback addresses into sigtramp page. */
  42abi_ulong default_sigreturn;
  43abi_ulong default_rt_sigreturn;
  44
  45/*
  46 * System includes define _NSIG as SIGRTMAX + 1,
  47 * but qemu (like the kernel) defines TARGET_NSIG as TARGET_SIGRTMAX
  48 * and the first signal is SIGHUP defined as 1
  49 * Signal number 0 is reserved for use as kill(pid, 0), to test whether
  50 * a process exists without sending it a signal.
  51 */
  52#ifdef __SIGRTMAX
  53QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
  54#endif
  55static uint8_t host_to_target_signal_table[_NSIG] = {
  56    [SIGHUP] = TARGET_SIGHUP,
  57    [SIGINT] = TARGET_SIGINT,
  58    [SIGQUIT] = TARGET_SIGQUIT,
  59    [SIGILL] = TARGET_SIGILL,
  60    [SIGTRAP] = TARGET_SIGTRAP,
  61    [SIGABRT] = TARGET_SIGABRT,
  62/*    [SIGIOT] = TARGET_SIGIOT,*/
  63    [SIGBUS] = TARGET_SIGBUS,
  64    [SIGFPE] = TARGET_SIGFPE,
  65    [SIGKILL] = TARGET_SIGKILL,
  66    [SIGUSR1] = TARGET_SIGUSR1,
  67    [SIGSEGV] = TARGET_SIGSEGV,
  68    [SIGUSR2] = TARGET_SIGUSR2,
  69    [SIGPIPE] = TARGET_SIGPIPE,
  70    [SIGALRM] = TARGET_SIGALRM,
  71    [SIGTERM] = TARGET_SIGTERM,
  72#ifdef SIGSTKFLT
  73    [SIGSTKFLT] = TARGET_SIGSTKFLT,
  74#endif
  75    [SIGCHLD] = TARGET_SIGCHLD,
  76    [SIGCONT] = TARGET_SIGCONT,
  77    [SIGSTOP] = TARGET_SIGSTOP,
  78    [SIGTSTP] = TARGET_SIGTSTP,
  79    [SIGTTIN] = TARGET_SIGTTIN,
  80    [SIGTTOU] = TARGET_SIGTTOU,
  81    [SIGURG] = TARGET_SIGURG,
  82    [SIGXCPU] = TARGET_SIGXCPU,
  83    [SIGXFSZ] = TARGET_SIGXFSZ,
  84    [SIGVTALRM] = TARGET_SIGVTALRM,
  85    [SIGPROF] = TARGET_SIGPROF,
  86    [SIGWINCH] = TARGET_SIGWINCH,
  87    [SIGIO] = TARGET_SIGIO,
  88    [SIGPWR] = TARGET_SIGPWR,
  89    [SIGSYS] = TARGET_SIGSYS,
  90    /* next signals stay the same */
  91};
  92
  93static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
  94
  95/* valid sig is between 1 and _NSIG - 1 */
  96int host_to_target_signal(int sig)
  97{
  98    if (sig < 1 || sig >= _NSIG) {
  99        return sig;
 100    }
 101    return host_to_target_signal_table[sig];
 102}
 103
 104/* valid sig is between 1 and TARGET_NSIG */
 105int target_to_host_signal(int sig)
 106{
 107    if (sig < 1 || sig > TARGET_NSIG) {
 108        return sig;
 109    }
 110    return target_to_host_signal_table[sig];
 111}
 112
 113static inline void target_sigaddset(target_sigset_t *set, int signum)
 114{
 115    signum--;
 116    abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
 117    set->sig[signum / TARGET_NSIG_BPW] |= mask;
 118}
 119
 120static inline int target_sigismember(const target_sigset_t *set, int signum)
 121{
 122    signum--;
 123    abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
 124    return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
 125}
 126
 127void host_to_target_sigset_internal(target_sigset_t *d,
 128                                    const sigset_t *s)
 129{
 130    int host_sig, target_sig;
 131    target_sigemptyset(d);
 132    for (host_sig = 1; host_sig < _NSIG; host_sig++) {
 133        target_sig = host_to_target_signal(host_sig);
 134        if (target_sig < 1 || target_sig > TARGET_NSIG) {
 135            continue;
 136        }
 137        if (sigismember(s, host_sig)) {
 138            target_sigaddset(d, target_sig);
 139        }
 140    }
 141}
 142
 143void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
 144{
 145    target_sigset_t d1;
 146    int i;
 147
 148    host_to_target_sigset_internal(&d1, s);
 149    for(i = 0;i < TARGET_NSIG_WORDS; i++)
 150        d->sig[i] = tswapal(d1.sig[i]);
 151}
 152
 153void target_to_host_sigset_internal(sigset_t *d,
 154                                    const target_sigset_t *s)
 155{
 156    int host_sig, target_sig;
 157    sigemptyset(d);
 158    for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
 159        host_sig = target_to_host_signal(target_sig);
 160        if (host_sig < 1 || host_sig >= _NSIG) {
 161            continue;
 162        }
 163        if (target_sigismember(s, target_sig)) {
 164            sigaddset(d, host_sig);
 165        }
 166    }
 167}
 168
 169void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
 170{
 171    target_sigset_t s1;
 172    int i;
 173
 174    for(i = 0;i < TARGET_NSIG_WORDS; i++)
 175        s1.sig[i] = tswapal(s->sig[i]);
 176    target_to_host_sigset_internal(d, &s1);
 177}
 178
 179void host_to_target_old_sigset(abi_ulong *old_sigset,
 180                               const sigset_t *sigset)
 181{
 182    target_sigset_t d;
 183    host_to_target_sigset(&d, sigset);
 184    *old_sigset = d.sig[0];
 185}
 186
 187void target_to_host_old_sigset(sigset_t *sigset,
 188                               const abi_ulong *old_sigset)
 189{
 190    target_sigset_t d;
 191    int i;
 192
 193    d.sig[0] = *old_sigset;
 194    for(i = 1;i < TARGET_NSIG_WORDS; i++)
 195        d.sig[i] = 0;
 196    target_to_host_sigset(sigset, &d);
 197}
 198
 199int block_signals(void)
 200{
 201    TaskState *ts = (TaskState *)thread_cpu->opaque;
 202    sigset_t set;
 203
 204    /* It's OK to block everything including SIGSEGV, because we won't
 205     * run any further guest code before unblocking signals in
 206     * process_pending_signals().
 207     */
 208    sigfillset(&set);
 209    sigprocmask(SIG_SETMASK, &set, 0);
 210
 211    return qatomic_xchg(&ts->signal_pending, 1);
 212}
 213
 214/* Wrapper for sigprocmask function
 215 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
 216 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
 217 * a signal was already pending and the syscall must be restarted, or
 218 * 0 on success.
 219 * If set is NULL, this is guaranteed not to fail.
 220 */
 221int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
 222{
 223    TaskState *ts = (TaskState *)thread_cpu->opaque;
 224
 225    if (oldset) {
 226        *oldset = ts->signal_mask;
 227    }
 228
 229    if (set) {
 230        int i;
 231
 232        if (block_signals()) {
 233            return -TARGET_ERESTARTSYS;
 234        }
 235
 236        switch (how) {
 237        case SIG_BLOCK:
 238            sigorset(&ts->signal_mask, &ts->signal_mask, set);
 239            break;
 240        case SIG_UNBLOCK:
 241            for (i = 1; i <= NSIG; ++i) {
 242                if (sigismember(set, i)) {
 243                    sigdelset(&ts->signal_mask, i);
 244                }
 245            }
 246            break;
 247        case SIG_SETMASK:
 248            ts->signal_mask = *set;
 249            break;
 250        default:
 251            g_assert_not_reached();
 252        }
 253
 254        /* Silently ignore attempts to change blocking status of KILL or STOP */
 255        sigdelset(&ts->signal_mask, SIGKILL);
 256        sigdelset(&ts->signal_mask, SIGSTOP);
 257    }
 258    return 0;
 259}
 260
 261#if !defined(TARGET_NIOS2)
 262/* Just set the guest's signal mask to the specified value; the
 263 * caller is assumed to have called block_signals() already.
 264 */
 265void set_sigmask(const sigset_t *set)
 266{
 267    TaskState *ts = (TaskState *)thread_cpu->opaque;
 268
 269    ts->signal_mask = *set;
 270}
 271#endif
 272
 273/* sigaltstack management */
 274
 275int on_sig_stack(unsigned long sp)
 276{
 277    TaskState *ts = (TaskState *)thread_cpu->opaque;
 278
 279    return (sp - ts->sigaltstack_used.ss_sp
 280            < ts->sigaltstack_used.ss_size);
 281}
 282
 283int sas_ss_flags(unsigned long sp)
 284{
 285    TaskState *ts = (TaskState *)thread_cpu->opaque;
 286
 287    return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
 288            : on_sig_stack(sp) ? SS_ONSTACK : 0);
 289}
 290
 291abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
 292{
 293    /*
 294     * This is the X/Open sanctioned signal stack switching.
 295     */
 296    TaskState *ts = (TaskState *)thread_cpu->opaque;
 297
 298    if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
 299        return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
 300    }
 301    return sp;
 302}
 303
 304void target_save_altstack(target_stack_t *uss, CPUArchState *env)
 305{
 306    TaskState *ts = (TaskState *)thread_cpu->opaque;
 307
 308    __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
 309    __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
 310    __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
 311}
 312
 313abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
 314{
 315    TaskState *ts = (TaskState *)thread_cpu->opaque;
 316    size_t minstacksize = TARGET_MINSIGSTKSZ;
 317    target_stack_t ss;
 318
 319#if defined(TARGET_PPC64)
 320    /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
 321    struct image_info *image = ts->info;
 322    if (get_ppc64_abi(image) > 1) {
 323        minstacksize = 4096;
 324    }
 325#endif
 326
 327    __get_user(ss.ss_sp, &uss->ss_sp);
 328    __get_user(ss.ss_size, &uss->ss_size);
 329    __get_user(ss.ss_flags, &uss->ss_flags);
 330
 331    if (on_sig_stack(get_sp_from_cpustate(env))) {
 332        return -TARGET_EPERM;
 333    }
 334
 335    switch (ss.ss_flags) {
 336    default:
 337        return -TARGET_EINVAL;
 338
 339    case TARGET_SS_DISABLE:
 340        ss.ss_size = 0;
 341        ss.ss_sp = 0;
 342        break;
 343
 344    case TARGET_SS_ONSTACK:
 345    case 0:
 346        if (ss.ss_size < minstacksize) {
 347            return -TARGET_ENOMEM;
 348        }
 349        break;
 350    }
 351
 352    ts->sigaltstack_used.ss_sp = ss.ss_sp;
 353    ts->sigaltstack_used.ss_size = ss.ss_size;
 354    return 0;
 355}
 356
 357/* siginfo conversion */
 358
 359static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
 360                                                 const siginfo_t *info)
 361{
 362    int sig = host_to_target_signal(info->si_signo);
 363    int si_code = info->si_code;
 364    int si_type;
 365    tinfo->si_signo = sig;
 366    tinfo->si_errno = 0;
 367    tinfo->si_code = info->si_code;
 368
 369    /* This memset serves two purposes:
 370     * (1) ensure we don't leak random junk to the guest later
 371     * (2) placate false positives from gcc about fields
 372     *     being used uninitialized if it chooses to inline both this
 373     *     function and tswap_siginfo() into host_to_target_siginfo().
 374     */
 375    memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
 376
 377    /* This is awkward, because we have to use a combination of
 378     * the si_code and si_signo to figure out which of the union's
 379     * members are valid. (Within the host kernel it is always possible
 380     * to tell, but the kernel carefully avoids giving userspace the
 381     * high 16 bits of si_code, so we don't have the information to
 382     * do this the easy way...) We therefore make our best guess,
 383     * bearing in mind that a guest can spoof most of the si_codes
 384     * via rt_sigqueueinfo() if it likes.
 385     *
 386     * Once we have made our guess, we record it in the top 16 bits of
 387     * the si_code, so that tswap_siginfo() later can use it.
 388     * tswap_siginfo() will strip these top bits out before writing
 389     * si_code to the guest (sign-extending the lower bits).
 390     */
 391
 392    switch (si_code) {
 393    case SI_USER:
 394    case SI_TKILL:
 395    case SI_KERNEL:
 396        /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
 397         * These are the only unspoofable si_code values.
 398         */
 399        tinfo->_sifields._kill._pid = info->si_pid;
 400        tinfo->_sifields._kill._uid = info->si_uid;
 401        si_type = QEMU_SI_KILL;
 402        break;
 403    default:
 404        /* Everything else is spoofable. Make best guess based on signal */
 405        switch (sig) {
 406        case TARGET_SIGCHLD:
 407            tinfo->_sifields._sigchld._pid = info->si_pid;
 408            tinfo->_sifields._sigchld._uid = info->si_uid;
 409            tinfo->_sifields._sigchld._status = info->si_status;
 410            tinfo->_sifields._sigchld._utime = info->si_utime;
 411            tinfo->_sifields._sigchld._stime = info->si_stime;
 412            si_type = QEMU_SI_CHLD;
 413            break;
 414        case TARGET_SIGIO:
 415            tinfo->_sifields._sigpoll._band = info->si_band;
 416            tinfo->_sifields._sigpoll._fd = info->si_fd;
 417            si_type = QEMU_SI_POLL;
 418            break;
 419        default:
 420            /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
 421            tinfo->_sifields._rt._pid = info->si_pid;
 422            tinfo->_sifields._rt._uid = info->si_uid;
 423            /* XXX: potential problem if 64 bit */
 424            tinfo->_sifields._rt._sigval.sival_ptr
 425                = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
 426            si_type = QEMU_SI_RT;
 427            break;
 428        }
 429        break;
 430    }
 431
 432    tinfo->si_code = deposit32(si_code, 16, 16, si_type);
 433}
 434
 435void tswap_siginfo(target_siginfo_t *tinfo,
 436                   const target_siginfo_t *info)
 437{
 438    int si_type = extract32(info->si_code, 16, 16);
 439    int si_code = sextract32(info->si_code, 0, 16);
 440
 441    __put_user(info->si_signo, &tinfo->si_signo);
 442    __put_user(info->si_errno, &tinfo->si_errno);
 443    __put_user(si_code, &tinfo->si_code);
 444
 445    /* We can use our internal marker of which fields in the structure
 446     * are valid, rather than duplicating the guesswork of
 447     * host_to_target_siginfo_noswap() here.
 448     */
 449    switch (si_type) {
 450    case QEMU_SI_KILL:
 451        __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
 452        __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
 453        break;
 454    case QEMU_SI_TIMER:
 455        __put_user(info->_sifields._timer._timer1,
 456                   &tinfo->_sifields._timer._timer1);
 457        __put_user(info->_sifields._timer._timer2,
 458                   &tinfo->_sifields._timer._timer2);
 459        break;
 460    case QEMU_SI_POLL:
 461        __put_user(info->_sifields._sigpoll._band,
 462                   &tinfo->_sifields._sigpoll._band);
 463        __put_user(info->_sifields._sigpoll._fd,
 464                   &tinfo->_sifields._sigpoll._fd);
 465        break;
 466    case QEMU_SI_FAULT:
 467        __put_user(info->_sifields._sigfault._addr,
 468                   &tinfo->_sifields._sigfault._addr);
 469        break;
 470    case QEMU_SI_CHLD:
 471        __put_user(info->_sifields._sigchld._pid,
 472                   &tinfo->_sifields._sigchld._pid);
 473        __put_user(info->_sifields._sigchld._uid,
 474                   &tinfo->_sifields._sigchld._uid);
 475        __put_user(info->_sifields._sigchld._status,
 476                   &tinfo->_sifields._sigchld._status);
 477        __put_user(info->_sifields._sigchld._utime,
 478                   &tinfo->_sifields._sigchld._utime);
 479        __put_user(info->_sifields._sigchld._stime,
 480                   &tinfo->_sifields._sigchld._stime);
 481        break;
 482    case QEMU_SI_RT:
 483        __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
 484        __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
 485        __put_user(info->_sifields._rt._sigval.sival_ptr,
 486                   &tinfo->_sifields._rt._sigval.sival_ptr);
 487        break;
 488    default:
 489        g_assert_not_reached();
 490    }
 491}
 492
 493void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
 494{
 495    target_siginfo_t tgt_tmp;
 496    host_to_target_siginfo_noswap(&tgt_tmp, info);
 497    tswap_siginfo(tinfo, &tgt_tmp);
 498}
 499
 500/* XXX: we support only POSIX RT signals are used. */
 501/* XXX: find a solution for 64 bit (additional malloced data is needed) */
 502void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
 503{
 504    /* This conversion is used only for the rt_sigqueueinfo syscall,
 505     * and so we know that the _rt fields are the valid ones.
 506     */
 507    abi_ulong sival_ptr;
 508
 509    __get_user(info->si_signo, &tinfo->si_signo);
 510    __get_user(info->si_errno, &tinfo->si_errno);
 511    __get_user(info->si_code, &tinfo->si_code);
 512    __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
 513    __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
 514    __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
 515    info->si_value.sival_ptr = (void *)(long)sival_ptr;
 516}
 517
 518static int fatal_signal (int sig)
 519{
 520    switch (sig) {
 521    case TARGET_SIGCHLD:
 522    case TARGET_SIGURG:
 523    case TARGET_SIGWINCH:
 524        /* Ignored by default.  */
 525        return 0;
 526    case TARGET_SIGCONT:
 527    case TARGET_SIGSTOP:
 528    case TARGET_SIGTSTP:
 529    case TARGET_SIGTTIN:
 530    case TARGET_SIGTTOU:
 531        /* Job control signals.  */
 532        return 0;
 533    default:
 534        return 1;
 535    }
 536}
 537
 538/* returns 1 if given signal should dump core if not handled */
 539static int core_dump_signal(int sig)
 540{
 541    switch (sig) {
 542    case TARGET_SIGABRT:
 543    case TARGET_SIGFPE:
 544    case TARGET_SIGILL:
 545    case TARGET_SIGQUIT:
 546    case TARGET_SIGSEGV:
 547    case TARGET_SIGTRAP:
 548    case TARGET_SIGBUS:
 549        return (1);
 550    default:
 551        return (0);
 552    }
 553}
 554
 555static void signal_table_init(void)
 556{
 557    int host_sig, target_sig, count;
 558
 559    /*
 560     * Signals are supported starting from TARGET_SIGRTMIN and going up
 561     * until we run out of host realtime signals.
 562     * glibc at least uses only the lower 2 rt signals and probably
 563     * nobody's using the upper ones.
 564     * it's why SIGRTMIN (34) is generally greater than __SIGRTMIN (32)
 565     * To fix this properly we need to do manual signal delivery multiplexed
 566     * over a single host signal.
 567     * Attempts for configure "missing" signals via sigaction will be
 568     * silently ignored.
 569     */
 570    for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
 571        target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
 572        if (target_sig <= TARGET_NSIG) {
 573            host_to_target_signal_table[host_sig] = target_sig;
 574        }
 575    }
 576
 577    /* generate signal conversion tables */
 578    for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
 579        target_to_host_signal_table[target_sig] = _NSIG; /* poison */
 580    }
 581    for (host_sig = 1; host_sig < _NSIG; host_sig++) {
 582        if (host_to_target_signal_table[host_sig] == 0) {
 583            host_to_target_signal_table[host_sig] = host_sig;
 584        }
 585        target_sig = host_to_target_signal_table[host_sig];
 586        if (target_sig <= TARGET_NSIG) {
 587            target_to_host_signal_table[target_sig] = host_sig;
 588        }
 589    }
 590
 591    if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
 592        for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
 593            if (target_to_host_signal_table[target_sig] == _NSIG) {
 594                count++;
 595            }
 596        }
 597        trace_signal_table_init(count);
 598    }
 599}
 600
 601void signal_init(void)
 602{
 603    TaskState *ts = (TaskState *)thread_cpu->opaque;
 604    struct sigaction act;
 605    struct sigaction oact;
 606    int i;
 607    int host_sig;
 608
 609    /* initialize signal conversion tables */
 610    signal_table_init();
 611
 612    /* Set the signal mask from the host mask. */
 613    sigprocmask(0, 0, &ts->signal_mask);
 614
 615    sigfillset(&act.sa_mask);
 616    act.sa_flags = SA_SIGINFO;
 617    act.sa_sigaction = host_signal_handler;
 618    for(i = 1; i <= TARGET_NSIG; i++) {
 619#ifdef CONFIG_GPROF
 620        if (i == TARGET_SIGPROF) {
 621            continue;
 622        }
 623#endif
 624        host_sig = target_to_host_signal(i);
 625        sigaction(host_sig, NULL, &oact);
 626        if (oact.sa_sigaction == (void *)SIG_IGN) {
 627            sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
 628        } else if (oact.sa_sigaction == (void *)SIG_DFL) {
 629            sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
 630        }
 631        /* If there's already a handler installed then something has
 632           gone horribly wrong, so don't even try to handle that case.  */
 633        /* Install some handlers for our own use.  We need at least
 634           SIGSEGV and SIGBUS, to detect exceptions.  We can not just
 635           trap all signals because it affects syscall interrupt
 636           behavior.  But do trap all default-fatal signals.  */
 637        if (fatal_signal (i))
 638            sigaction(host_sig, &act, NULL);
 639    }
 640}
 641
 642/* Force a synchronously taken signal. The kernel force_sig() function
 643 * also forces the signal to "not blocked, not ignored", but for QEMU
 644 * that work is done in process_pending_signals().
 645 */
 646void force_sig(int sig)
 647{
 648    CPUState *cpu = thread_cpu;
 649    CPUArchState *env = cpu->env_ptr;
 650    target_siginfo_t info = {};
 651
 652    info.si_signo = sig;
 653    info.si_errno = 0;
 654    info.si_code = TARGET_SI_KERNEL;
 655    info._sifields._kill._pid = 0;
 656    info._sifields._kill._uid = 0;
 657    queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
 658}
 659
 660/*
 661 * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
 662 * 'force' part is handled in process_pending_signals().
 663 */
 664void force_sig_fault(int sig, int code, abi_ulong addr)
 665{
 666    CPUState *cpu = thread_cpu;
 667    CPUArchState *env = cpu->env_ptr;
 668    target_siginfo_t info = {};
 669
 670    info.si_signo = sig;
 671    info.si_errno = 0;
 672    info.si_code = code;
 673    info._sifields._sigfault._addr = addr;
 674    queue_signal(env, sig, QEMU_SI_FAULT, &info);
 675}
 676
 677/* Force a SIGSEGV if we couldn't write to memory trying to set
 678 * up the signal frame. oldsig is the signal we were trying to handle
 679 * at the point of failure.
 680 */
 681#if !defined(TARGET_RISCV)
 682void force_sigsegv(int oldsig)
 683{
 684    if (oldsig == SIGSEGV) {
 685        /* Make sure we don't try to deliver the signal again; this will
 686         * end up with handle_pending_signal() calling dump_core_and_abort().
 687         */
 688        sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
 689    }
 690    force_sig(TARGET_SIGSEGV);
 691}
 692#endif
 693
 694void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
 695                           MMUAccessType access_type, bool maperr, uintptr_t ra)
 696{
 697    const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
 698
 699    if (tcg_ops->record_sigsegv) {
 700        tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
 701    }
 702
 703    force_sig_fault(TARGET_SIGSEGV,
 704                    maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
 705                    addr);
 706    cpu->exception_index = EXCP_INTERRUPT;
 707    cpu_loop_exit_restore(cpu, ra);
 708}
 709
 710void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
 711                          MMUAccessType access_type, uintptr_t ra)
 712{
 713    const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
 714
 715    if (tcg_ops->record_sigbus) {
 716        tcg_ops->record_sigbus(cpu, addr, access_type, ra);
 717    }
 718
 719    force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
 720    cpu->exception_index = EXCP_INTERRUPT;
 721    cpu_loop_exit_restore(cpu, ra);
 722}
 723
 724/* abort execution with signal */
 725static void QEMU_NORETURN dump_core_and_abort(int target_sig)
 726{
 727    CPUState *cpu = thread_cpu;
 728    CPUArchState *env = cpu->env_ptr;
 729    TaskState *ts = (TaskState *)cpu->opaque;
 730    int host_sig, core_dumped = 0;
 731    struct sigaction act;
 732
 733    host_sig = target_to_host_signal(target_sig);
 734    trace_user_force_sig(env, target_sig, host_sig);
 735    gdb_signalled(env, target_sig);
 736
 737    /* dump core if supported by target binary format */
 738    if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
 739        stop_all_tasks();
 740        core_dumped =
 741            ((*ts->bprm->core_dump)(target_sig, env) == 0);
 742    }
 743    if (core_dumped) {
 744        /* we already dumped the core of target process, we don't want
 745         * a coredump of qemu itself */
 746        struct rlimit nodump;
 747        getrlimit(RLIMIT_CORE, &nodump);
 748        nodump.rlim_cur=0;
 749        setrlimit(RLIMIT_CORE, &nodump);
 750        (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
 751            target_sig, strsignal(host_sig), "core dumped" );
 752    }
 753
 754    /* The proper exit code for dying from an uncaught signal is
 755     * -<signal>.  The kernel doesn't allow exit() or _exit() to pass
 756     * a negative value.  To get the proper exit code we need to
 757     * actually die from an uncaught signal.  Here the default signal
 758     * handler is installed, we send ourself a signal and we wait for
 759     * it to arrive. */
 760    sigfillset(&act.sa_mask);
 761    act.sa_handler = SIG_DFL;
 762    act.sa_flags = 0;
 763    sigaction(host_sig, &act, NULL);
 764
 765    /* For some reason raise(host_sig) doesn't send the signal when
 766     * statically linked on x86-64. */
 767    kill(getpid(), host_sig);
 768
 769    /* Make sure the signal isn't masked (just reuse the mask inside
 770    of act) */
 771    sigdelset(&act.sa_mask, host_sig);
 772    sigsuspend(&act.sa_mask);
 773
 774    /* unreachable */
 775    abort();
 776}
 777
 778/* queue a signal so that it will be send to the virtual CPU as soon
 779   as possible */
 780int queue_signal(CPUArchState *env, int sig, int si_type,
 781                 target_siginfo_t *info)
 782{
 783    CPUState *cpu = env_cpu(env);
 784    TaskState *ts = cpu->opaque;
 785
 786    trace_user_queue_signal(env, sig);
 787
 788    info->si_code = deposit32(info->si_code, 16, 16, si_type);
 789
 790    ts->sync_signal.info = *info;
 791    ts->sync_signal.pending = sig;
 792    /* signal that a new signal is pending */
 793    qatomic_set(&ts->signal_pending, 1);
 794    return 1; /* indicates that the signal was queued */
 795}
 796
 797
 798/* Adjust the signal context to rewind out of safe-syscall if we're in it */
 799static inline void rewind_if_in_safe_syscall(void *puc)
 800{
 801#ifdef HAVE_SAFE_SYSCALL
 802    ucontext_t *uc = (ucontext_t *)puc;
 803    uintptr_t pcreg = host_signal_pc(uc);
 804
 805    if (pcreg > (uintptr_t)safe_syscall_start
 806        && pcreg < (uintptr_t)safe_syscall_end) {
 807        host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
 808    }
 809#endif
 810}
 811
 812static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
 813{
 814    CPUArchState *env = thread_cpu->env_ptr;
 815    CPUState *cpu = env_cpu(env);
 816    TaskState *ts = cpu->opaque;
 817    target_siginfo_t tinfo;
 818    ucontext_t *uc = puc;
 819    struct emulated_sigtable *k;
 820    int guest_sig;
 821    uintptr_t pc = 0;
 822    bool sync_sig = false;
 823
 824    /*
 825     * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
 826     * handling wrt signal blocking and unwinding.
 827     */
 828    if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) {
 829        MMUAccessType access_type;
 830        uintptr_t host_addr;
 831        abi_ptr guest_addr;
 832        bool is_write;
 833
 834        host_addr = (uintptr_t)info->si_addr;
 835
 836        /*
 837         * Convert forcefully to guest address space: addresses outside
 838         * reserved_va are still valid to report via SEGV_MAPERR.
 839         */
 840        guest_addr = h2g_nocheck(host_addr);
 841
 842        pc = host_signal_pc(uc);
 843        is_write = host_signal_write(info, uc);
 844        access_type = adjust_signal_pc(&pc, is_write);
 845
 846        if (host_sig == SIGSEGV) {
 847            bool maperr = true;
 848
 849            if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
 850                /* If this was a write to a TB protected page, restart. */
 851                if (is_write &&
 852                    handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask,
 853                                                pc, guest_addr)) {
 854                    return;
 855                }
 856
 857                /*
 858                 * With reserved_va, the whole address space is PROT_NONE,
 859                 * which means that we may get ACCERR when we want MAPERR.
 860                 */
 861                if (page_get_flags(guest_addr) & PAGE_VALID) {
 862                    maperr = false;
 863                } else {
 864                    info->si_code = SEGV_MAPERR;
 865                }
 866            }
 867
 868            sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
 869            cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
 870        } else {
 871            sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
 872            if (info->si_code == BUS_ADRALN) {
 873                cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
 874            }
 875        }
 876
 877        sync_sig = true;
 878    }
 879
 880    /* get target signal number */
 881    guest_sig = host_to_target_signal(host_sig);
 882    if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
 883        return;
 884    }
 885    trace_user_host_signal(env, host_sig, guest_sig);
 886
 887    host_to_target_siginfo_noswap(&tinfo, info);
 888    k = &ts->sigtab[guest_sig - 1];
 889    k->info = tinfo;
 890    k->pending = guest_sig;
 891    ts->signal_pending = 1;
 892
 893    /*
 894     * For synchronous signals, unwind the cpu state to the faulting
 895     * insn and then exit back to the main loop so that the signal
 896     * is delivered immediately.
 897     */
 898    if (sync_sig) {
 899        cpu->exception_index = EXCP_INTERRUPT;
 900        cpu_loop_exit_restore(cpu, pc);
 901    }
 902
 903    rewind_if_in_safe_syscall(puc);
 904
 905    /*
 906     * Block host signals until target signal handler entered. We
 907     * can't block SIGSEGV or SIGBUS while we're executing guest
 908     * code in case the guest code provokes one in the window between
 909     * now and it getting out to the main loop. Signals will be
 910     * unblocked again in process_pending_signals().
 911     *
 912     * WARNING: we cannot use sigfillset() here because the uc_sigmask
 913     * field is a kernel sigset_t, which is much smaller than the
 914     * libc sigset_t which sigfillset() operates on. Using sigfillset()
 915     * would write 0xff bytes off the end of the structure and trash
 916     * data on the struct.
 917     * We can't use sizeof(uc->uc_sigmask) either, because the libc
 918     * headers define the struct field with the wrong (too large) type.
 919     */
 920    memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
 921    sigdelset(&uc->uc_sigmask, SIGSEGV);
 922    sigdelset(&uc->uc_sigmask, SIGBUS);
 923
 924    /* interrupt the virtual CPU as soon as possible */
 925    cpu_exit(thread_cpu);
 926}
 927
 928/* do_sigaltstack() returns target values and errnos. */
 929/* compare linux/kernel/signal.c:do_sigaltstack() */
 930abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
 931                        CPUArchState *env)
 932{
 933    target_stack_t oss, *uoss = NULL;
 934    abi_long ret = -TARGET_EFAULT;
 935
 936    if (uoss_addr) {
 937        /* Verify writability now, but do not alter user memory yet. */
 938        if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
 939            goto out;
 940        }
 941        target_save_altstack(&oss, env);
 942    }
 943
 944    if (uss_addr) {
 945        target_stack_t *uss;
 946
 947        if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
 948            goto out;
 949        }
 950        ret = target_restore_altstack(uss, env);
 951        if (ret) {
 952            goto out;
 953        }
 954    }
 955
 956    if (uoss_addr) {
 957        memcpy(uoss, &oss, sizeof(oss));
 958        unlock_user_struct(uoss, uoss_addr, 1);
 959        uoss = NULL;
 960    }
 961    ret = 0;
 962
 963 out:
 964    if (uoss) {
 965        unlock_user_struct(uoss, uoss_addr, 0);
 966    }
 967    return ret;
 968}
 969
 970/* do_sigaction() return target values and host errnos */
 971int do_sigaction(int sig, const struct target_sigaction *act,
 972                 struct target_sigaction *oact, abi_ulong ka_restorer)
 973{
 974    struct target_sigaction *k;
 975    struct sigaction act1;
 976    int host_sig;
 977    int ret = 0;
 978
 979    trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
 980
 981    if (sig < 1 || sig > TARGET_NSIG) {
 982        return -TARGET_EINVAL;
 983    }
 984
 985    if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
 986        return -TARGET_EINVAL;
 987    }
 988
 989    if (block_signals()) {
 990        return -TARGET_ERESTARTSYS;
 991    }
 992
 993    k = &sigact_table[sig - 1];
 994    if (oact) {
 995        __put_user(k->_sa_handler, &oact->_sa_handler);
 996        __put_user(k->sa_flags, &oact->sa_flags);
 997#ifdef TARGET_ARCH_HAS_SA_RESTORER
 998        __put_user(k->sa_restorer, &oact->sa_restorer);
 999#endif
1000        /* Not swapped.  */
1001        oact->sa_mask = k->sa_mask;
1002    }
1003    if (act) {
1004        /* FIXME: This is not threadsafe.  */
1005        __get_user(k->_sa_handler, &act->_sa_handler);
1006        __get_user(k->sa_flags, &act->sa_flags);
1007#ifdef TARGET_ARCH_HAS_SA_RESTORER
1008        __get_user(k->sa_restorer, &act->sa_restorer);
1009#endif
1010#ifdef TARGET_ARCH_HAS_KA_RESTORER
1011        k->ka_restorer = ka_restorer;
1012#endif
1013        /* To be swapped in target_to_host_sigset.  */
1014        k->sa_mask = act->sa_mask;
1015
1016        /* we update the host linux signal state */
1017        host_sig = target_to_host_signal(sig);
1018        trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
1019        if (host_sig > SIGRTMAX) {
1020            /* we don't have enough host signals to map all target signals */
1021            qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
1022                          sig);
1023            /*
1024             * we don't return an error here because some programs try to
1025             * register an handler for all possible rt signals even if they
1026             * don't need it.
1027             * An error here can abort them whereas there can be no problem
1028             * to not have the signal available later.
1029             * This is the case for golang,
1030             *   See https://github.com/golang/go/issues/33746
1031             * So we silently ignore the error.
1032             */
1033            return 0;
1034        }
1035        if (host_sig != SIGSEGV && host_sig != SIGBUS) {
1036            sigfillset(&act1.sa_mask);
1037            act1.sa_flags = SA_SIGINFO;
1038            if (k->sa_flags & TARGET_SA_RESTART)
1039                act1.sa_flags |= SA_RESTART;
1040            /* NOTE: it is important to update the host kernel signal
1041               ignore state to avoid getting unexpected interrupted
1042               syscalls */
1043            if (k->_sa_handler == TARGET_SIG_IGN) {
1044                act1.sa_sigaction = (void *)SIG_IGN;
1045            } else if (k->_sa_handler == TARGET_SIG_DFL) {
1046                if (fatal_signal (sig))
1047                    act1.sa_sigaction = host_signal_handler;
1048                else
1049                    act1.sa_sigaction = (void *)SIG_DFL;
1050            } else {
1051                act1.sa_sigaction = host_signal_handler;
1052            }
1053            ret = sigaction(host_sig, &act1, NULL);
1054        }
1055    }
1056    return ret;
1057}
1058
1059static void handle_pending_signal(CPUArchState *cpu_env, int sig,
1060                                  struct emulated_sigtable *k)
1061{
1062    CPUState *cpu = env_cpu(cpu_env);
1063    abi_ulong handler;
1064    sigset_t set;
1065    target_sigset_t target_old_set;
1066    struct target_sigaction *sa;
1067    TaskState *ts = cpu->opaque;
1068
1069    trace_user_handle_signal(cpu_env, sig);
1070    /* dequeue signal */
1071    k->pending = 0;
1072
1073    sig = gdb_handlesig(cpu, sig);
1074    if (!sig) {
1075        sa = NULL;
1076        handler = TARGET_SIG_IGN;
1077    } else {
1078        sa = &sigact_table[sig - 1];
1079        handler = sa->_sa_handler;
1080    }
1081
1082    if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1083        print_taken_signal(sig, &k->info);
1084    }
1085
1086    if (handler == TARGET_SIG_DFL) {
1087        /* default handler : ignore some signal. The other are job control or fatal */
1088        if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
1089            kill(getpid(),SIGSTOP);
1090        } else if (sig != TARGET_SIGCHLD &&
1091                   sig != TARGET_SIGURG &&
1092                   sig != TARGET_SIGWINCH &&
1093                   sig != TARGET_SIGCONT) {
1094            dump_core_and_abort(sig);
1095        }
1096    } else if (handler == TARGET_SIG_IGN) {
1097        /* ignore sig */
1098    } else if (handler == TARGET_SIG_ERR) {
1099        dump_core_and_abort(sig);
1100    } else {
1101        /* compute the blocked signals during the handler execution */
1102        sigset_t *blocked_set;
1103
1104        target_to_host_sigset(&set, &sa->sa_mask);
1105        /* SA_NODEFER indicates that the current signal should not be
1106           blocked during the handler */
1107        if (!(sa->sa_flags & TARGET_SA_NODEFER))
1108            sigaddset(&set, target_to_host_signal(sig));
1109
1110        /* save the previous blocked signal state to restore it at the
1111           end of the signal execution (see do_sigreturn) */
1112        host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1113
1114        /* block signals in the handler */
1115        blocked_set = ts->in_sigsuspend ?
1116            &ts->sigsuspend_mask : &ts->signal_mask;
1117        sigorset(&ts->signal_mask, blocked_set, &set);
1118        ts->in_sigsuspend = 0;
1119
1120        /* if the CPU is in VM86 mode, we restore the 32 bit values */
1121#if defined(TARGET_I386) && !defined(TARGET_X86_64)
1122        {
1123            CPUX86State *env = cpu_env;
1124            if (env->eflags & VM_MASK)
1125                save_v86_state(env);
1126        }
1127#endif
1128        /* prepare the stack frame of the virtual CPU */
1129#if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1130        if (sa->sa_flags & TARGET_SA_SIGINFO) {
1131            setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1132        } else {
1133            setup_frame(sig, sa, &target_old_set, cpu_env);
1134        }
1135#else
1136        /* These targets do not have traditional signals.  */
1137        setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1138#endif
1139        if (sa->sa_flags & TARGET_SA_RESETHAND) {
1140            sa->_sa_handler = TARGET_SIG_DFL;
1141        }
1142    }
1143}
1144
1145void process_pending_signals(CPUArchState *cpu_env)
1146{
1147    CPUState *cpu = env_cpu(cpu_env);
1148    int sig;
1149    TaskState *ts = cpu->opaque;
1150    sigset_t set;
1151    sigset_t *blocked_set;
1152
1153    while (qatomic_read(&ts->signal_pending)) {
1154        /* FIXME: This is not threadsafe.  */
1155        sigfillset(&set);
1156        sigprocmask(SIG_SETMASK, &set, 0);
1157
1158    restart_scan:
1159        sig = ts->sync_signal.pending;
1160        if (sig) {
1161            /* Synchronous signals are forced,
1162             * see force_sig_info() and callers in Linux
1163             * Note that not all of our queue_signal() calls in QEMU correspond
1164             * to force_sig_info() calls in Linux (some are send_sig_info()).
1165             * However it seems like a kernel bug to me to allow the process
1166             * to block a synchronous signal since it could then just end up
1167             * looping round and round indefinitely.
1168             */
1169            if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1170                || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1171                sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1172                sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1173            }
1174
1175            handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1176        }
1177
1178        for (sig = 1; sig <= TARGET_NSIG; sig++) {
1179            blocked_set = ts->in_sigsuspend ?
1180                &ts->sigsuspend_mask : &ts->signal_mask;
1181
1182            if (ts->sigtab[sig - 1].pending &&
1183                (!sigismember(blocked_set,
1184                              target_to_host_signal_table[sig]))) {
1185                handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1186                /* Restart scan from the beginning, as handle_pending_signal
1187                 * might have resulted in a new synchronous signal (eg SIGSEGV).
1188                 */
1189                goto restart_scan;
1190            }
1191        }
1192
1193        /* if no signal is pending, unblock signals and recheck (the act
1194         * of unblocking might cause us to take another host signal which
1195         * will set signal_pending again).
1196         */
1197        qatomic_set(&ts->signal_pending, 0);
1198        ts->in_sigsuspend = 0;
1199        set = ts->signal_mask;
1200        sigdelset(&set, SIGSEGV);
1201        sigdelset(&set, SIGBUS);
1202        sigprocmask(SIG_SETMASK, &set, 0);
1203    }
1204    ts->in_sigsuspend = 0;
1205}
1206