1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "qemu/bitops.h"
21#include <sys/ucontext.h>
22#include <sys/resource.h>
23
24#include "qemu.h"
25#include "qemu-common.h"
26#include "trace.h"
27#include "signal-common.h"
28
29struct target_sigaltstack target_sigaltstack_used = {
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
33};
34
35static struct target_sigaction sigact_table[TARGET_NSIG];
36
37static void host_signal_handler(int host_signum, siginfo_t *info,
38 void *puc);
39
40static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57#ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59#endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75
76
77
78
79
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
82};
83static uint8_t target_to_host_signal_table[_NSIG];
84
85int host_to_target_signal(int sig)
86{
87 if (sig < 0 || sig >= _NSIG)
88 return sig;
89 return host_to_target_signal_table[sig];
90}
91
92int target_to_host_signal(int sig)
93{
94 if (sig < 0 || sig >= _NSIG)
95 return sig;
96 return target_to_host_signal_table[sig];
97}
98
99static inline void target_sigaddset(target_sigset_t *set, int signum)
100{
101 signum--;
102 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
103 set->sig[signum / TARGET_NSIG_BPW] |= mask;
104}
105
106static inline int target_sigismember(const target_sigset_t *set, int signum)
107{
108 signum--;
109 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
110 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
111}
112
113void host_to_target_sigset_internal(target_sigset_t *d,
114 const sigset_t *s)
115{
116 int i;
117 target_sigemptyset(d);
118 for (i = 1; i <= TARGET_NSIG; i++) {
119 if (sigismember(s, i)) {
120 target_sigaddset(d, host_to_target_signal(i));
121 }
122 }
123}
124
125void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
126{
127 target_sigset_t d1;
128 int i;
129
130 host_to_target_sigset_internal(&d1, s);
131 for(i = 0;i < TARGET_NSIG_WORDS; i++)
132 d->sig[i] = tswapal(d1.sig[i]);
133}
134
135void target_to_host_sigset_internal(sigset_t *d,
136 const target_sigset_t *s)
137{
138 int i;
139 sigemptyset(d);
140 for (i = 1; i <= TARGET_NSIG; i++) {
141 if (target_sigismember(s, i)) {
142 sigaddset(d, target_to_host_signal(i));
143 }
144 }
145}
146
147void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
148{
149 target_sigset_t s1;
150 int i;
151
152 for(i = 0;i < TARGET_NSIG_WORDS; i++)
153 s1.sig[i] = tswapal(s->sig[i]);
154 target_to_host_sigset_internal(d, &s1);
155}
156
157void host_to_target_old_sigset(abi_ulong *old_sigset,
158 const sigset_t *sigset)
159{
160 target_sigset_t d;
161 host_to_target_sigset(&d, sigset);
162 *old_sigset = d.sig[0];
163}
164
165void target_to_host_old_sigset(sigset_t *sigset,
166 const abi_ulong *old_sigset)
167{
168 target_sigset_t d;
169 int i;
170
171 d.sig[0] = *old_sigset;
172 for(i = 1;i < TARGET_NSIG_WORDS; i++)
173 d.sig[i] = 0;
174 target_to_host_sigset(sigset, &d);
175}
176
177int block_signals(void)
178{
179 TaskState *ts = (TaskState *)thread_cpu->opaque;
180 sigset_t set;
181
182
183
184
185
186 sigfillset(&set);
187 sigprocmask(SIG_SETMASK, &set, 0);
188
189 return atomic_xchg(&ts->signal_pending, 1);
190}
191
192
193
194
195
196
197
198
199int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
200{
201 TaskState *ts = (TaskState *)thread_cpu->opaque;
202
203 if (oldset) {
204 *oldset = ts->signal_mask;
205 }
206
207 if (set) {
208 int i;
209
210 if (block_signals()) {
211 return -TARGET_ERESTARTSYS;
212 }
213
214 switch (how) {
215 case SIG_BLOCK:
216 sigorset(&ts->signal_mask, &ts->signal_mask, set);
217 break;
218 case SIG_UNBLOCK:
219 for (i = 1; i <= NSIG; ++i) {
220 if (sigismember(set, i)) {
221 sigdelset(&ts->signal_mask, i);
222 }
223 }
224 break;
225 case SIG_SETMASK:
226 ts->signal_mask = *set;
227 break;
228 default:
229 g_assert_not_reached();
230 }
231
232
233 sigdelset(&ts->signal_mask, SIGKILL);
234 sigdelset(&ts->signal_mask, SIGSTOP);
235 }
236 return 0;
237}
238
239#if !defined(TARGET_NIOS2)
240
241
242
243void set_sigmask(const sigset_t *set)
244{
245 TaskState *ts = (TaskState *)thread_cpu->opaque;
246
247 ts->signal_mask = *set;
248}
249#endif
250
251
252
253int on_sig_stack(unsigned long sp)
254{
255 return (sp - target_sigaltstack_used.ss_sp
256 < target_sigaltstack_used.ss_size);
257}
258
259int sas_ss_flags(unsigned long sp)
260{
261 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
262 : on_sig_stack(sp) ? SS_ONSTACK : 0);
263}
264
265abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
266{
267
268
269
270 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
271 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
272 }
273 return sp;
274}
275
276void target_save_altstack(target_stack_t *uss, CPUArchState *env)
277{
278 __put_user(target_sigaltstack_used.ss_sp, &uss->ss_sp);
279 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
280 __put_user(target_sigaltstack_used.ss_size, &uss->ss_size);
281}
282
283
284
285static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
286 const siginfo_t *info)
287{
288 int sig = host_to_target_signal(info->si_signo);
289 int si_code = info->si_code;
290 int si_type;
291 tinfo->si_signo = sig;
292 tinfo->si_errno = 0;
293 tinfo->si_code = info->si_code;
294
295
296
297
298
299
300
301 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318 switch (si_code) {
319 case SI_USER:
320 case SI_TKILL:
321 case SI_KERNEL:
322
323
324
325 tinfo->_sifields._kill._pid = info->si_pid;
326 tinfo->_sifields._kill._uid = info->si_uid;
327 si_type = QEMU_SI_KILL;
328 break;
329 default:
330
331 switch (sig) {
332 case TARGET_SIGCHLD:
333 tinfo->_sifields._sigchld._pid = info->si_pid;
334 tinfo->_sifields._sigchld._uid = info->si_uid;
335 tinfo->_sifields._sigchld._status
336 = host_to_target_waitstatus(info->si_status);
337 tinfo->_sifields._sigchld._utime = info->si_utime;
338 tinfo->_sifields._sigchld._stime = info->si_stime;
339 si_type = QEMU_SI_CHLD;
340 break;
341 case TARGET_SIGIO:
342 tinfo->_sifields._sigpoll._band = info->si_band;
343 tinfo->_sifields._sigpoll._fd = info->si_fd;
344 si_type = QEMU_SI_POLL;
345 break;
346 default:
347
348 tinfo->_sifields._rt._pid = info->si_pid;
349 tinfo->_sifields._rt._uid = info->si_uid;
350
351 tinfo->_sifields._rt._sigval.sival_ptr
352 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
353 si_type = QEMU_SI_RT;
354 break;
355 }
356 break;
357 }
358
359 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
360}
361
362void tswap_siginfo(target_siginfo_t *tinfo,
363 const target_siginfo_t *info)
364{
365 int si_type = extract32(info->si_code, 16, 16);
366 int si_code = sextract32(info->si_code, 0, 16);
367
368 __put_user(info->si_signo, &tinfo->si_signo);
369 __put_user(info->si_errno, &tinfo->si_errno);
370 __put_user(si_code, &tinfo->si_code);
371
372
373
374
375
376 switch (si_type) {
377 case QEMU_SI_KILL:
378 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
379 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
380 break;
381 case QEMU_SI_TIMER:
382 __put_user(info->_sifields._timer._timer1,
383 &tinfo->_sifields._timer._timer1);
384 __put_user(info->_sifields._timer._timer2,
385 &tinfo->_sifields._timer._timer2);
386 break;
387 case QEMU_SI_POLL:
388 __put_user(info->_sifields._sigpoll._band,
389 &tinfo->_sifields._sigpoll._band);
390 __put_user(info->_sifields._sigpoll._fd,
391 &tinfo->_sifields._sigpoll._fd);
392 break;
393 case QEMU_SI_FAULT:
394 __put_user(info->_sifields._sigfault._addr,
395 &tinfo->_sifields._sigfault._addr);
396 break;
397 case QEMU_SI_CHLD:
398 __put_user(info->_sifields._sigchld._pid,
399 &tinfo->_sifields._sigchld._pid);
400 __put_user(info->_sifields._sigchld._uid,
401 &tinfo->_sifields._sigchld._uid);
402 __put_user(info->_sifields._sigchld._status,
403 &tinfo->_sifields._sigchld._status);
404 __put_user(info->_sifields._sigchld._utime,
405 &tinfo->_sifields._sigchld._utime);
406 __put_user(info->_sifields._sigchld._stime,
407 &tinfo->_sifields._sigchld._stime);
408 break;
409 case QEMU_SI_RT:
410 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
411 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
412 __put_user(info->_sifields._rt._sigval.sival_ptr,
413 &tinfo->_sifields._rt._sigval.sival_ptr);
414 break;
415 default:
416 g_assert_not_reached();
417 }
418}
419
420void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
421{
422 target_siginfo_t tgt_tmp;
423 host_to_target_siginfo_noswap(&tgt_tmp, info);
424 tswap_siginfo(tinfo, &tgt_tmp);
425}
426
427
428
429void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
430{
431
432
433
434 abi_ulong sival_ptr;
435
436 __get_user(info->si_signo, &tinfo->si_signo);
437 __get_user(info->si_errno, &tinfo->si_errno);
438 __get_user(info->si_code, &tinfo->si_code);
439 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
440 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
441 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
442 info->si_value.sival_ptr = (void *)(long)sival_ptr;
443}
444
445static int fatal_signal (int sig)
446{
447 switch (sig) {
448 case TARGET_SIGCHLD:
449 case TARGET_SIGURG:
450 case TARGET_SIGWINCH:
451
452 return 0;
453 case TARGET_SIGCONT:
454 case TARGET_SIGSTOP:
455 case TARGET_SIGTSTP:
456 case TARGET_SIGTTIN:
457 case TARGET_SIGTTOU:
458
459 return 0;
460 default:
461 return 1;
462 }
463}
464
465
466static int core_dump_signal(int sig)
467{
468 switch (sig) {
469 case TARGET_SIGABRT:
470 case TARGET_SIGFPE:
471 case TARGET_SIGILL:
472 case TARGET_SIGQUIT:
473 case TARGET_SIGSEGV:
474 case TARGET_SIGTRAP:
475 case TARGET_SIGBUS:
476 return (1);
477 default:
478 return (0);
479 }
480}
481
482void signal_init(void)
483{
484 TaskState *ts = (TaskState *)thread_cpu->opaque;
485 struct sigaction act;
486 struct sigaction oact;
487 int i, j;
488 int host_sig;
489
490
491 for(i = 1; i < _NSIG; i++) {
492 if (host_to_target_signal_table[i] == 0)
493 host_to_target_signal_table[i] = i;
494 }
495 for(i = 1; i < _NSIG; i++) {
496 j = host_to_target_signal_table[i];
497 target_to_host_signal_table[j] = i;
498 }
499
500
501 sigprocmask(0, 0, &ts->signal_mask);
502
503
504
505 memset(sigact_table, 0, sizeof(sigact_table));
506
507 sigfillset(&act.sa_mask);
508 act.sa_flags = SA_SIGINFO;
509 act.sa_sigaction = host_signal_handler;
510 for(i = 1; i <= TARGET_NSIG; i++) {
511 host_sig = target_to_host_signal(i);
512 sigaction(host_sig, NULL, &oact);
513 if (oact.sa_sigaction == (void *)SIG_IGN) {
514 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
515 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
516 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
517 }
518
519
520
521
522
523
524 if (fatal_signal (i))
525 sigaction(host_sig, &act, NULL);
526 }
527}
528
529
530
531
532
533void force_sig(int sig)
534{
535 CPUState *cpu = thread_cpu;
536 CPUArchState *env = cpu->env_ptr;
537 target_siginfo_t info;
538
539 info.si_signo = sig;
540 info.si_errno = 0;
541 info.si_code = TARGET_SI_KERNEL;
542 info._sifields._kill._pid = 0;
543 info._sifields._kill._uid = 0;
544 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
545}
546
547
548
549
550
551#if !defined(TARGET_RISCV)
552void force_sigsegv(int oldsig)
553{
554 if (oldsig == SIGSEGV) {
555
556
557
558 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
559 }
560 force_sig(TARGET_SIGSEGV);
561}
562
563#endif
564
565
566static void QEMU_NORETURN dump_core_and_abort(int target_sig)
567{
568 CPUState *cpu = thread_cpu;
569 CPUArchState *env = cpu->env_ptr;
570 TaskState *ts = (TaskState *)cpu->opaque;
571 int host_sig, core_dumped = 0;
572 struct sigaction act;
573
574 host_sig = target_to_host_signal(target_sig);
575 trace_user_force_sig(env, target_sig, host_sig);
576 gdb_signalled(env, target_sig);
577
578
579 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
580 stop_all_tasks();
581 core_dumped =
582 ((*ts->bprm->core_dump)(target_sig, env) == 0);
583 }
584 if (core_dumped) {
585
586
587 struct rlimit nodump;
588 getrlimit(RLIMIT_CORE, &nodump);
589 nodump.rlim_cur=0;
590 setrlimit(RLIMIT_CORE, &nodump);
591 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
592 target_sig, strsignal(host_sig), "core dumped" );
593 }
594
595
596
597
598
599
600
601 sigfillset(&act.sa_mask);
602 act.sa_handler = SIG_DFL;
603 act.sa_flags = 0;
604 sigaction(host_sig, &act, NULL);
605
606
607
608 kill(getpid(), host_sig);
609
610
611
612 sigdelset(&act.sa_mask, host_sig);
613 sigsuspend(&act.sa_mask);
614
615
616 abort();
617}
618
619
620
621int queue_signal(CPUArchState *env, int sig, int si_type,
622 target_siginfo_t *info)
623{
624 CPUState *cpu = ENV_GET_CPU(env);
625 TaskState *ts = cpu->opaque;
626
627 trace_user_queue_signal(env, sig);
628
629 info->si_code = deposit32(info->si_code, 16, 16, si_type);
630
631 ts->sync_signal.info = *info;
632 ts->sync_signal.pending = sig;
633
634 atomic_set(&ts->signal_pending, 1);
635 return 1;
636}
637
638#ifndef HAVE_SAFE_SYSCALL
639static inline void rewind_if_in_safe_syscall(void *puc)
640{
641
642}
643#endif
644
645static void host_signal_handler(int host_signum, siginfo_t *info,
646 void *puc)
647{
648 CPUArchState *env = thread_cpu->env_ptr;
649 CPUState *cpu = ENV_GET_CPU(env);
650 TaskState *ts = cpu->opaque;
651
652 int sig;
653 target_siginfo_t tinfo;
654 ucontext_t *uc = puc;
655 struct emulated_sigtable *k;
656
657
658
659 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
660 && info->si_code > 0) {
661 if (cpu_signal_handler(host_signum, info, puc))
662 return;
663 }
664
665
666 sig = host_to_target_signal(host_signum);
667 if (sig < 1 || sig > TARGET_NSIG)
668 return;
669 trace_user_host_signal(env, host_signum, sig);
670
671 rewind_if_in_safe_syscall(puc);
672
673 host_to_target_siginfo_noswap(&tinfo, info);
674 k = &ts->sigtab[sig - 1];
675 k->info = tinfo;
676 k->pending = sig;
677 ts->signal_pending = 1;
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
694 sigdelset(&uc->uc_sigmask, SIGSEGV);
695 sigdelset(&uc->uc_sigmask, SIGBUS);
696
697
698 cpu_exit(thread_cpu);
699}
700
701
702
703abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
704{
705 int ret;
706 struct target_sigaltstack oss;
707
708
709 if(uoss_addr)
710 {
711 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
712 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
713 __put_user(sas_ss_flags(sp), &oss.ss_flags);
714 }
715
716 if(uss_addr)
717 {
718 struct target_sigaltstack *uss;
719 struct target_sigaltstack ss;
720 size_t minstacksize = TARGET_MINSIGSTKSZ;
721
722#if defined(TARGET_PPC64)
723
724 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
725 if (get_ppc64_abi(image) > 1) {
726 minstacksize = 4096;
727 }
728#endif
729
730 ret = -TARGET_EFAULT;
731 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
732 goto out;
733 }
734 __get_user(ss.ss_sp, &uss->ss_sp);
735 __get_user(ss.ss_size, &uss->ss_size);
736 __get_user(ss.ss_flags, &uss->ss_flags);
737 unlock_user_struct(uss, uss_addr, 0);
738
739 ret = -TARGET_EPERM;
740 if (on_sig_stack(sp))
741 goto out;
742
743 ret = -TARGET_EINVAL;
744 if (ss.ss_flags != TARGET_SS_DISABLE
745 && ss.ss_flags != TARGET_SS_ONSTACK
746 && ss.ss_flags != 0)
747 goto out;
748
749 if (ss.ss_flags == TARGET_SS_DISABLE) {
750 ss.ss_size = 0;
751 ss.ss_sp = 0;
752 } else {
753 ret = -TARGET_ENOMEM;
754 if (ss.ss_size < minstacksize) {
755 goto out;
756 }
757 }
758
759 target_sigaltstack_used.ss_sp = ss.ss_sp;
760 target_sigaltstack_used.ss_size = ss.ss_size;
761 }
762
763 if (uoss_addr) {
764 ret = -TARGET_EFAULT;
765 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
766 goto out;
767 }
768
769 ret = 0;
770out:
771 return ret;
772}
773
774
775int do_sigaction(int sig, const struct target_sigaction *act,
776 struct target_sigaction *oact)
777{
778 struct target_sigaction *k;
779 struct sigaction act1;
780 int host_sig;
781 int ret = 0;
782
783 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
784 return -TARGET_EINVAL;
785 }
786
787 if (block_signals()) {
788 return -TARGET_ERESTARTSYS;
789 }
790
791 k = &sigact_table[sig - 1];
792 if (oact) {
793 __put_user(k->_sa_handler, &oact->_sa_handler);
794 __put_user(k->sa_flags, &oact->sa_flags);
795#ifdef TARGET_ARCH_HAS_SA_RESTORER
796 __put_user(k->sa_restorer, &oact->sa_restorer);
797#endif
798
799 oact->sa_mask = k->sa_mask;
800 }
801 if (act) {
802
803 __get_user(k->_sa_handler, &act->_sa_handler);
804 __get_user(k->sa_flags, &act->sa_flags);
805#ifdef TARGET_ARCH_HAS_SA_RESTORER
806 __get_user(k->sa_restorer, &act->sa_restorer);
807#endif
808
809 k->sa_mask = act->sa_mask;
810
811
812 host_sig = target_to_host_signal(sig);
813 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
814 sigfillset(&act1.sa_mask);
815 act1.sa_flags = SA_SIGINFO;
816 if (k->sa_flags & TARGET_SA_RESTART)
817 act1.sa_flags |= SA_RESTART;
818
819
820
821 if (k->_sa_handler == TARGET_SIG_IGN) {
822 act1.sa_sigaction = (void *)SIG_IGN;
823 } else if (k->_sa_handler == TARGET_SIG_DFL) {
824 if (fatal_signal (sig))
825 act1.sa_sigaction = host_signal_handler;
826 else
827 act1.sa_sigaction = (void *)SIG_DFL;
828 } else {
829 act1.sa_sigaction = host_signal_handler;
830 }
831 ret = sigaction(host_sig, &act1, NULL);
832 }
833 }
834 return ret;
835}
836
837static void handle_pending_signal(CPUArchState *cpu_env, int sig,
838 struct emulated_sigtable *k)
839{
840 CPUState *cpu = ENV_GET_CPU(cpu_env);
841 abi_ulong handler;
842 sigset_t set;
843 target_sigset_t target_old_set;
844 struct target_sigaction *sa;
845 TaskState *ts = cpu->opaque;
846
847 trace_user_handle_signal(cpu_env, sig);
848
849 k->pending = 0;
850
851 sig = gdb_handlesig(cpu, sig);
852 if (!sig) {
853 sa = NULL;
854 handler = TARGET_SIG_IGN;
855 } else {
856 sa = &sigact_table[sig - 1];
857 handler = sa->_sa_handler;
858 }
859
860 if (do_strace) {
861 print_taken_signal(sig, &k->info);
862 }
863
864 if (handler == TARGET_SIG_DFL) {
865
866 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
867 kill(getpid(),SIGSTOP);
868 } else if (sig != TARGET_SIGCHLD &&
869 sig != TARGET_SIGURG &&
870 sig != TARGET_SIGWINCH &&
871 sig != TARGET_SIGCONT) {
872 dump_core_and_abort(sig);
873 }
874 } else if (handler == TARGET_SIG_IGN) {
875
876 } else if (handler == TARGET_SIG_ERR) {
877 dump_core_and_abort(sig);
878 } else {
879
880 sigset_t *blocked_set;
881
882 target_to_host_sigset(&set, &sa->sa_mask);
883
884
885 if (!(sa->sa_flags & TARGET_SA_NODEFER))
886 sigaddset(&set, target_to_host_signal(sig));
887
888
889
890 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
891
892
893 blocked_set = ts->in_sigsuspend ?
894 &ts->sigsuspend_mask : &ts->signal_mask;
895 sigorset(&ts->signal_mask, blocked_set, &set);
896 ts->in_sigsuspend = 0;
897
898
899#if defined(TARGET_I386) && !defined(TARGET_X86_64)
900 {
901 CPUX86State *env = cpu_env;
902 if (env->eflags & VM_MASK)
903 save_v86_state(env);
904 }
905#endif
906
907#if defined(TARGET_ARCH_HAS_SETUP_FRAME)
908 if (sa->sa_flags & TARGET_SA_SIGINFO) {
909 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
910 } else {
911 setup_frame(sig, sa, &target_old_set, cpu_env);
912 }
913#else
914
915 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
916#endif
917 if (sa->sa_flags & TARGET_SA_RESETHAND) {
918 sa->_sa_handler = TARGET_SIG_DFL;
919 }
920 }
921}
922
923void process_pending_signals(CPUArchState *cpu_env)
924{
925 CPUState *cpu = ENV_GET_CPU(cpu_env);
926 int sig;
927 TaskState *ts = cpu->opaque;
928 sigset_t set;
929 sigset_t *blocked_set;
930
931 while (atomic_read(&ts->signal_pending)) {
932
933 sigfillset(&set);
934 sigprocmask(SIG_SETMASK, &set, 0);
935
936 restart_scan:
937 sig = ts->sync_signal.pending;
938 if (sig) {
939
940
941
942
943
944
945
946
947 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
948 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
949 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
950 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
951 }
952
953 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
954 }
955
956 for (sig = 1; sig <= TARGET_NSIG; sig++) {
957 blocked_set = ts->in_sigsuspend ?
958 &ts->sigsuspend_mask : &ts->signal_mask;
959
960 if (ts->sigtab[sig - 1].pending &&
961 (!sigismember(blocked_set,
962 target_to_host_signal_table[sig]))) {
963 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
964
965
966
967 goto restart_scan;
968 }
969 }
970
971
972
973
974
975 atomic_set(&ts->signal_pending, 0);
976 ts->in_sigsuspend = 0;
977 set = ts->signal_mask;
978 sigdelset(&set, SIGSEGV);
979 sigdelset(&set, SIGBUS);
980 sigprocmask(SIG_SETMASK, &set, 0);
981 }
982 ts->in_sigsuspend = 0;
983}
984