1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "qemu/bitops.h"
21#include <sys/ucontext.h>
22#include <sys/resource.h>
23
24#include "qemu.h"
25#include "trace.h"
26#include "signal-common.h"
27
28static struct target_sigaction sigact_table[TARGET_NSIG];
29
30static void host_signal_handler(int host_signum, siginfo_t *info,
31 void *puc);
32
33static uint8_t host_to_target_signal_table[_NSIG] = {
34 [SIGHUP] = TARGET_SIGHUP,
35 [SIGINT] = TARGET_SIGINT,
36 [SIGQUIT] = TARGET_SIGQUIT,
37 [SIGILL] = TARGET_SIGILL,
38 [SIGTRAP] = TARGET_SIGTRAP,
39 [SIGABRT] = TARGET_SIGABRT,
40
41 [SIGBUS] = TARGET_SIGBUS,
42 [SIGFPE] = TARGET_SIGFPE,
43 [SIGKILL] = TARGET_SIGKILL,
44 [SIGUSR1] = TARGET_SIGUSR1,
45 [SIGSEGV] = TARGET_SIGSEGV,
46 [SIGUSR2] = TARGET_SIGUSR2,
47 [SIGPIPE] = TARGET_SIGPIPE,
48 [SIGALRM] = TARGET_SIGALRM,
49 [SIGTERM] = TARGET_SIGTERM,
50#ifdef SIGSTKFLT
51 [SIGSTKFLT] = TARGET_SIGSTKFLT,
52#endif
53 [SIGCHLD] = TARGET_SIGCHLD,
54 [SIGCONT] = TARGET_SIGCONT,
55 [SIGSTOP] = TARGET_SIGSTOP,
56 [SIGTSTP] = TARGET_SIGTSTP,
57 [SIGTTIN] = TARGET_SIGTTIN,
58 [SIGTTOU] = TARGET_SIGTTOU,
59 [SIGURG] = TARGET_SIGURG,
60 [SIGXCPU] = TARGET_SIGXCPU,
61 [SIGXFSZ] = TARGET_SIGXFSZ,
62 [SIGVTALRM] = TARGET_SIGVTALRM,
63 [SIGPROF] = TARGET_SIGPROF,
64 [SIGWINCH] = TARGET_SIGWINCH,
65 [SIGIO] = TARGET_SIGIO,
66 [SIGPWR] = TARGET_SIGPWR,
67 [SIGSYS] = TARGET_SIGSYS,
68
69
70
71
72
73 [__SIGRTMIN] = __SIGRTMAX,
74 [__SIGRTMAX] = __SIGRTMIN,
75};
76static uint8_t target_to_host_signal_table[_NSIG];
77
78int host_to_target_signal(int sig)
79{
80 if (sig < 0 || sig >= _NSIG)
81 return sig;
82 return host_to_target_signal_table[sig];
83}
84
85int target_to_host_signal(int sig)
86{
87 if (sig < 0 || sig >= _NSIG)
88 return sig;
89 return target_to_host_signal_table[sig];
90}
91
92static inline void target_sigaddset(target_sigset_t *set, int signum)
93{
94 signum--;
95 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
96 set->sig[signum / TARGET_NSIG_BPW] |= mask;
97}
98
99static inline int target_sigismember(const target_sigset_t *set, int signum)
100{
101 signum--;
102 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
103 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
104}
105
106void host_to_target_sigset_internal(target_sigset_t *d,
107 const sigset_t *s)
108{
109 int i;
110 target_sigemptyset(d);
111 for (i = 1; i <= TARGET_NSIG; i++) {
112 if (sigismember(s, i)) {
113 target_sigaddset(d, host_to_target_signal(i));
114 }
115 }
116}
117
118void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
119{
120 target_sigset_t d1;
121 int i;
122
123 host_to_target_sigset_internal(&d1, s);
124 for(i = 0;i < TARGET_NSIG_WORDS; i++)
125 d->sig[i] = tswapal(d1.sig[i]);
126}
127
128void target_to_host_sigset_internal(sigset_t *d,
129 const target_sigset_t *s)
130{
131 int i;
132 sigemptyset(d);
133 for (i = 1; i <= TARGET_NSIG; i++) {
134 if (target_sigismember(s, i)) {
135 sigaddset(d, target_to_host_signal(i));
136 }
137 }
138}
139
140void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
141{
142 target_sigset_t s1;
143 int i;
144
145 for(i = 0;i < TARGET_NSIG_WORDS; i++)
146 s1.sig[i] = tswapal(s->sig[i]);
147 target_to_host_sigset_internal(d, &s1);
148}
149
150void host_to_target_old_sigset(abi_ulong *old_sigset,
151 const sigset_t *sigset)
152{
153 target_sigset_t d;
154 host_to_target_sigset(&d, sigset);
155 *old_sigset = d.sig[0];
156}
157
158void target_to_host_old_sigset(sigset_t *sigset,
159 const abi_ulong *old_sigset)
160{
161 target_sigset_t d;
162 int i;
163
164 d.sig[0] = *old_sigset;
165 for(i = 1;i < TARGET_NSIG_WORDS; i++)
166 d.sig[i] = 0;
167 target_to_host_sigset(sigset, &d);
168}
169
170int block_signals(void)
171{
172 TaskState *ts = (TaskState *)thread_cpu->opaque;
173 sigset_t set;
174
175
176
177
178
179 sigfillset(&set);
180 sigprocmask(SIG_SETMASK, &set, 0);
181
182 return atomic_xchg(&ts->signal_pending, 1);
183}
184
185
186
187
188
189
190
191
192int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
193{
194 TaskState *ts = (TaskState *)thread_cpu->opaque;
195
196 if (oldset) {
197 *oldset = ts->signal_mask;
198 }
199
200 if (set) {
201 int i;
202
203 if (block_signals()) {
204 return -TARGET_ERESTARTSYS;
205 }
206
207 switch (how) {
208 case SIG_BLOCK:
209 sigorset(&ts->signal_mask, &ts->signal_mask, set);
210 break;
211 case SIG_UNBLOCK:
212 for (i = 1; i <= NSIG; ++i) {
213 if (sigismember(set, i)) {
214 sigdelset(&ts->signal_mask, i);
215 }
216 }
217 break;
218 case SIG_SETMASK:
219 ts->signal_mask = *set;
220 break;
221 default:
222 g_assert_not_reached();
223 }
224
225
226 sigdelset(&ts->signal_mask, SIGKILL);
227 sigdelset(&ts->signal_mask, SIGSTOP);
228 }
229 return 0;
230}
231
232#if !defined(TARGET_NIOS2)
233
234
235
236void set_sigmask(const sigset_t *set)
237{
238 TaskState *ts = (TaskState *)thread_cpu->opaque;
239
240 ts->signal_mask = *set;
241}
242#endif
243
244
245
246int on_sig_stack(unsigned long sp)
247{
248 TaskState *ts = (TaskState *)thread_cpu->opaque;
249
250 return (sp - ts->sigaltstack_used.ss_sp
251 < ts->sigaltstack_used.ss_size);
252}
253
254int sas_ss_flags(unsigned long sp)
255{
256 TaskState *ts = (TaskState *)thread_cpu->opaque;
257
258 return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
259 : on_sig_stack(sp) ? SS_ONSTACK : 0);
260}
261
262abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
263{
264
265
266
267 TaskState *ts = (TaskState *)thread_cpu->opaque;
268
269 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
270 return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
271 }
272 return sp;
273}
274
275void target_save_altstack(target_stack_t *uss, CPUArchState *env)
276{
277 TaskState *ts = (TaskState *)thread_cpu->opaque;
278
279 __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
280 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
281 __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
282}
283
284
285
286static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
287 const siginfo_t *info)
288{
289 int sig = host_to_target_signal(info->si_signo);
290 int si_code = info->si_code;
291 int si_type;
292 tinfo->si_signo = sig;
293 tinfo->si_errno = 0;
294 tinfo->si_code = info->si_code;
295
296
297
298
299
300
301
302 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319 switch (si_code) {
320 case SI_USER:
321 case SI_TKILL:
322 case SI_KERNEL:
323
324
325
326 tinfo->_sifields._kill._pid = info->si_pid;
327 tinfo->_sifields._kill._uid = info->si_uid;
328 si_type = QEMU_SI_KILL;
329 break;
330 default:
331
332 switch (sig) {
333 case TARGET_SIGCHLD:
334 tinfo->_sifields._sigchld._pid = info->si_pid;
335 tinfo->_sifields._sigchld._uid = info->si_uid;
336 tinfo->_sifields._sigchld._status
337 = host_to_target_waitstatus(info->si_status);
338 tinfo->_sifields._sigchld._utime = info->si_utime;
339 tinfo->_sifields._sigchld._stime = info->si_stime;
340 si_type = QEMU_SI_CHLD;
341 break;
342 case TARGET_SIGIO:
343 tinfo->_sifields._sigpoll._band = info->si_band;
344 tinfo->_sifields._sigpoll._fd = info->si_fd;
345 si_type = QEMU_SI_POLL;
346 break;
347 default:
348
349 tinfo->_sifields._rt._pid = info->si_pid;
350 tinfo->_sifields._rt._uid = info->si_uid;
351
352 tinfo->_sifields._rt._sigval.sival_ptr
353 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
354 si_type = QEMU_SI_RT;
355 break;
356 }
357 break;
358 }
359
360 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
361}
362
363void tswap_siginfo(target_siginfo_t *tinfo,
364 const target_siginfo_t *info)
365{
366 int si_type = extract32(info->si_code, 16, 16);
367 int si_code = sextract32(info->si_code, 0, 16);
368
369 __put_user(info->si_signo, &tinfo->si_signo);
370 __put_user(info->si_errno, &tinfo->si_errno);
371 __put_user(si_code, &tinfo->si_code);
372
373
374
375
376
377 switch (si_type) {
378 case QEMU_SI_KILL:
379 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
380 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
381 break;
382 case QEMU_SI_TIMER:
383 __put_user(info->_sifields._timer._timer1,
384 &tinfo->_sifields._timer._timer1);
385 __put_user(info->_sifields._timer._timer2,
386 &tinfo->_sifields._timer._timer2);
387 break;
388 case QEMU_SI_POLL:
389 __put_user(info->_sifields._sigpoll._band,
390 &tinfo->_sifields._sigpoll._band);
391 __put_user(info->_sifields._sigpoll._fd,
392 &tinfo->_sifields._sigpoll._fd);
393 break;
394 case QEMU_SI_FAULT:
395 __put_user(info->_sifields._sigfault._addr,
396 &tinfo->_sifields._sigfault._addr);
397 break;
398 case QEMU_SI_CHLD:
399 __put_user(info->_sifields._sigchld._pid,
400 &tinfo->_sifields._sigchld._pid);
401 __put_user(info->_sifields._sigchld._uid,
402 &tinfo->_sifields._sigchld._uid);
403 __put_user(info->_sifields._sigchld._status,
404 &tinfo->_sifields._sigchld._status);
405 __put_user(info->_sifields._sigchld._utime,
406 &tinfo->_sifields._sigchld._utime);
407 __put_user(info->_sifields._sigchld._stime,
408 &tinfo->_sifields._sigchld._stime);
409 break;
410 case QEMU_SI_RT:
411 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
412 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
413 __put_user(info->_sifields._rt._sigval.sival_ptr,
414 &tinfo->_sifields._rt._sigval.sival_ptr);
415 break;
416 default:
417 g_assert_not_reached();
418 }
419}
420
421void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
422{
423 target_siginfo_t tgt_tmp;
424 host_to_target_siginfo_noswap(&tgt_tmp, info);
425 tswap_siginfo(tinfo, &tgt_tmp);
426}
427
428
429
430void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
431{
432
433
434
435 abi_ulong sival_ptr;
436
437 __get_user(info->si_signo, &tinfo->si_signo);
438 __get_user(info->si_errno, &tinfo->si_errno);
439 __get_user(info->si_code, &tinfo->si_code);
440 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
441 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
442 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
443 info->si_value.sival_ptr = (void *)(long)sival_ptr;
444}
445
446static int fatal_signal (int sig)
447{
448 switch (sig) {
449 case TARGET_SIGCHLD:
450 case TARGET_SIGURG:
451 case TARGET_SIGWINCH:
452
453 return 0;
454 case TARGET_SIGCONT:
455 case TARGET_SIGSTOP:
456 case TARGET_SIGTSTP:
457 case TARGET_SIGTTIN:
458 case TARGET_SIGTTOU:
459
460 return 0;
461 default:
462 return 1;
463 }
464}
465
466
467static int core_dump_signal(int sig)
468{
469 switch (sig) {
470 case TARGET_SIGABRT:
471 case TARGET_SIGFPE:
472 case TARGET_SIGILL:
473 case TARGET_SIGQUIT:
474 case TARGET_SIGSEGV:
475 case TARGET_SIGTRAP:
476 case TARGET_SIGBUS:
477 return (1);
478 default:
479 return (0);
480 }
481}
482
483void signal_init(void)
484{
485 TaskState *ts = (TaskState *)thread_cpu->opaque;
486 struct sigaction act;
487 struct sigaction oact;
488 int i, j;
489 int host_sig;
490
491
492 for(i = 1; i < _NSIG; i++) {
493 if (host_to_target_signal_table[i] == 0)
494 host_to_target_signal_table[i] = i;
495 }
496 for(i = 1; i < _NSIG; i++) {
497 j = host_to_target_signal_table[i];
498 target_to_host_signal_table[j] = i;
499 }
500
501
502 sigprocmask(0, 0, &ts->signal_mask);
503
504
505
506 memset(sigact_table, 0, sizeof(sigact_table));
507
508 sigfillset(&act.sa_mask);
509 act.sa_flags = SA_SIGINFO;
510 act.sa_sigaction = host_signal_handler;
511 for(i = 1; i <= TARGET_NSIG; i++) {
512#ifdef TARGET_GPROF
513 if (i == SIGPROF) {
514 continue;
515 }
516#endif
517 host_sig = target_to_host_signal(i);
518 sigaction(host_sig, NULL, &oact);
519 if (oact.sa_sigaction == (void *)SIG_IGN) {
520 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
521 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
522 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
523 }
524
525
526
527
528
529
530 if (fatal_signal (i))
531 sigaction(host_sig, &act, NULL);
532 }
533}
534
535
536
537
538
539void force_sig(int sig)
540{
541 CPUState *cpu = thread_cpu;
542 CPUArchState *env = cpu->env_ptr;
543 target_siginfo_t info;
544
545 info.si_signo = sig;
546 info.si_errno = 0;
547 info.si_code = TARGET_SI_KERNEL;
548 info._sifields._kill._pid = 0;
549 info._sifields._kill._uid = 0;
550 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
551}
552
553
554
555
556
557#if !defined(TARGET_RISCV)
558void force_sigsegv(int oldsig)
559{
560 if (oldsig == SIGSEGV) {
561
562
563
564 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
565 }
566 force_sig(TARGET_SIGSEGV);
567}
568
569#endif
570
571
572static void QEMU_NORETURN dump_core_and_abort(int target_sig)
573{
574 CPUState *cpu = thread_cpu;
575 CPUArchState *env = cpu->env_ptr;
576 TaskState *ts = (TaskState *)cpu->opaque;
577 int host_sig, core_dumped = 0;
578 struct sigaction act;
579
580 host_sig = target_to_host_signal(target_sig);
581 trace_user_force_sig(env, target_sig, host_sig);
582 gdb_signalled(env, target_sig);
583
584
585 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
586 stop_all_tasks();
587 core_dumped =
588 ((*ts->bprm->core_dump)(target_sig, env) == 0);
589 }
590 if (core_dumped) {
591
592
593 struct rlimit nodump;
594 getrlimit(RLIMIT_CORE, &nodump);
595 nodump.rlim_cur=0;
596 setrlimit(RLIMIT_CORE, &nodump);
597 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
598 target_sig, strsignal(host_sig), "core dumped" );
599 }
600
601
602
603
604
605
606
607 sigfillset(&act.sa_mask);
608 act.sa_handler = SIG_DFL;
609 act.sa_flags = 0;
610 sigaction(host_sig, &act, NULL);
611
612
613
614 kill(getpid(), host_sig);
615
616
617
618 sigdelset(&act.sa_mask, host_sig);
619 sigsuspend(&act.sa_mask);
620
621
622 abort();
623}
624
625
626
627int queue_signal(CPUArchState *env, int sig, int si_type,
628 target_siginfo_t *info)
629{
630 CPUState *cpu = env_cpu(env);
631 TaskState *ts = cpu->opaque;
632
633 trace_user_queue_signal(env, sig);
634
635 info->si_code = deposit32(info->si_code, 16, 16, si_type);
636
637 ts->sync_signal.info = *info;
638 ts->sync_signal.pending = sig;
639
640 atomic_set(&ts->signal_pending, 1);
641 return 1;
642}
643
644#ifndef HAVE_SAFE_SYSCALL
645static inline void rewind_if_in_safe_syscall(void *puc)
646{
647
648}
649#endif
650
651static void host_signal_handler(int host_signum, siginfo_t *info,
652 void *puc)
653{
654 CPUArchState *env = thread_cpu->env_ptr;
655 CPUState *cpu = env_cpu(env);
656 TaskState *ts = cpu->opaque;
657
658 int sig;
659 target_siginfo_t tinfo;
660 ucontext_t *uc = puc;
661 struct emulated_sigtable *k;
662
663
664
665 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
666 && info->si_code > 0) {
667 if (cpu_signal_handler(host_signum, info, puc))
668 return;
669 }
670
671
672 sig = host_to_target_signal(host_signum);
673 if (sig < 1 || sig > TARGET_NSIG)
674 return;
675 trace_user_host_signal(env, host_signum, sig);
676
677 rewind_if_in_safe_syscall(puc);
678
679 host_to_target_siginfo_noswap(&tinfo, info);
680 k = &ts->sigtab[sig - 1];
681 k->info = tinfo;
682 k->pending = sig;
683 ts->signal_pending = 1;
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
700 sigdelset(&uc->uc_sigmask, SIGSEGV);
701 sigdelset(&uc->uc_sigmask, SIGBUS);
702
703
704 cpu_exit(thread_cpu);
705}
706
707
708
709abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
710{
711 int ret;
712 struct target_sigaltstack oss;
713 TaskState *ts = (TaskState *)thread_cpu->opaque;
714
715
716 if(uoss_addr)
717 {
718 __put_user(ts->sigaltstack_used.ss_sp, &oss.ss_sp);
719 __put_user(ts->sigaltstack_used.ss_size, &oss.ss_size);
720 __put_user(sas_ss_flags(sp), &oss.ss_flags);
721 }
722
723 if(uss_addr)
724 {
725 struct target_sigaltstack *uss;
726 struct target_sigaltstack ss;
727 size_t minstacksize = TARGET_MINSIGSTKSZ;
728
729#if defined(TARGET_PPC64)
730
731 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
732 if (get_ppc64_abi(image) > 1) {
733 minstacksize = 4096;
734 }
735#endif
736
737 ret = -TARGET_EFAULT;
738 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
739 goto out;
740 }
741 __get_user(ss.ss_sp, &uss->ss_sp);
742 __get_user(ss.ss_size, &uss->ss_size);
743 __get_user(ss.ss_flags, &uss->ss_flags);
744 unlock_user_struct(uss, uss_addr, 0);
745
746 ret = -TARGET_EPERM;
747 if (on_sig_stack(sp))
748 goto out;
749
750 ret = -TARGET_EINVAL;
751 if (ss.ss_flags != TARGET_SS_DISABLE
752 && ss.ss_flags != TARGET_SS_ONSTACK
753 && ss.ss_flags != 0)
754 goto out;
755
756 if (ss.ss_flags == TARGET_SS_DISABLE) {
757 ss.ss_size = 0;
758 ss.ss_sp = 0;
759 } else {
760 ret = -TARGET_ENOMEM;
761 if (ss.ss_size < minstacksize) {
762 goto out;
763 }
764 }
765
766 ts->sigaltstack_used.ss_sp = ss.ss_sp;
767 ts->sigaltstack_used.ss_size = ss.ss_size;
768 }
769
770 if (uoss_addr) {
771 ret = -TARGET_EFAULT;
772 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
773 goto out;
774 }
775
776 ret = 0;
777out:
778 return ret;
779}
780
781
782int do_sigaction(int sig, const struct target_sigaction *act,
783 struct target_sigaction *oact)
784{
785 struct target_sigaction *k;
786 struct sigaction act1;
787 int host_sig;
788 int ret = 0;
789
790 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
791 return -TARGET_EINVAL;
792 }
793
794 if (block_signals()) {
795 return -TARGET_ERESTARTSYS;
796 }
797
798 k = &sigact_table[sig - 1];
799 if (oact) {
800 __put_user(k->_sa_handler, &oact->_sa_handler);
801 __put_user(k->sa_flags, &oact->sa_flags);
802#ifdef TARGET_ARCH_HAS_SA_RESTORER
803 __put_user(k->sa_restorer, &oact->sa_restorer);
804#endif
805
806 oact->sa_mask = k->sa_mask;
807 }
808 if (act) {
809
810 __get_user(k->_sa_handler, &act->_sa_handler);
811 __get_user(k->sa_flags, &act->sa_flags);
812#ifdef TARGET_ARCH_HAS_SA_RESTORER
813 __get_user(k->sa_restorer, &act->sa_restorer);
814#endif
815
816 k->sa_mask = act->sa_mask;
817
818
819 host_sig = target_to_host_signal(sig);
820 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
821 sigfillset(&act1.sa_mask);
822 act1.sa_flags = SA_SIGINFO;
823 if (k->sa_flags & TARGET_SA_RESTART)
824 act1.sa_flags |= SA_RESTART;
825
826
827
828 if (k->_sa_handler == TARGET_SIG_IGN) {
829 act1.sa_sigaction = (void *)SIG_IGN;
830 } else if (k->_sa_handler == TARGET_SIG_DFL) {
831 if (fatal_signal (sig))
832 act1.sa_sigaction = host_signal_handler;
833 else
834 act1.sa_sigaction = (void *)SIG_DFL;
835 } else {
836 act1.sa_sigaction = host_signal_handler;
837 }
838 ret = sigaction(host_sig, &act1, NULL);
839 }
840 }
841 return ret;
842}
843
844static void handle_pending_signal(CPUArchState *cpu_env, int sig,
845 struct emulated_sigtable *k)
846{
847 CPUState *cpu = env_cpu(cpu_env);
848 abi_ulong handler;
849 sigset_t set;
850 target_sigset_t target_old_set;
851 struct target_sigaction *sa;
852 TaskState *ts = cpu->opaque;
853
854 trace_user_handle_signal(cpu_env, sig);
855
856 k->pending = 0;
857
858 sig = gdb_handlesig(cpu, sig);
859 if (!sig) {
860 sa = NULL;
861 handler = TARGET_SIG_IGN;
862 } else {
863 sa = &sigact_table[sig - 1];
864 handler = sa->_sa_handler;
865 }
866
867 if (do_strace) {
868 print_taken_signal(sig, &k->info);
869 }
870
871 if (handler == TARGET_SIG_DFL) {
872
873 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
874 kill(getpid(),SIGSTOP);
875 } else if (sig != TARGET_SIGCHLD &&
876 sig != TARGET_SIGURG &&
877 sig != TARGET_SIGWINCH &&
878 sig != TARGET_SIGCONT) {
879 dump_core_and_abort(sig);
880 }
881 } else if (handler == TARGET_SIG_IGN) {
882
883 } else if (handler == TARGET_SIG_ERR) {
884 dump_core_and_abort(sig);
885 } else {
886
887 sigset_t *blocked_set;
888
889 target_to_host_sigset(&set, &sa->sa_mask);
890
891
892 if (!(sa->sa_flags & TARGET_SA_NODEFER))
893 sigaddset(&set, target_to_host_signal(sig));
894
895
896
897 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
898
899
900 blocked_set = ts->in_sigsuspend ?
901 &ts->sigsuspend_mask : &ts->signal_mask;
902 sigorset(&ts->signal_mask, blocked_set, &set);
903 ts->in_sigsuspend = 0;
904
905
906#if defined(TARGET_I386) && !defined(TARGET_X86_64)
907 {
908 CPUX86State *env = cpu_env;
909 if (env->eflags & VM_MASK)
910 save_v86_state(env);
911 }
912#endif
913
914#if defined(TARGET_ARCH_HAS_SETUP_FRAME)
915 if (sa->sa_flags & TARGET_SA_SIGINFO) {
916 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
917 } else {
918 setup_frame(sig, sa, &target_old_set, cpu_env);
919 }
920#else
921
922 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
923#endif
924 if (sa->sa_flags & TARGET_SA_RESETHAND) {
925 sa->_sa_handler = TARGET_SIG_DFL;
926 }
927 }
928}
929
930void process_pending_signals(CPUArchState *cpu_env)
931{
932 CPUState *cpu = env_cpu(cpu_env);
933 int sig;
934 TaskState *ts = cpu->opaque;
935 sigset_t set;
936 sigset_t *blocked_set;
937
938 while (atomic_read(&ts->signal_pending)) {
939
940 sigfillset(&set);
941 sigprocmask(SIG_SETMASK, &set, 0);
942
943 restart_scan:
944 sig = ts->sync_signal.pending;
945 if (sig) {
946
947
948
949
950
951
952
953
954 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
955 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
956 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
957 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
958 }
959
960 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
961 }
962
963 for (sig = 1; sig <= TARGET_NSIG; sig++) {
964 blocked_set = ts->in_sigsuspend ?
965 &ts->sigsuspend_mask : &ts->signal_mask;
966
967 if (ts->sigtab[sig - 1].pending &&
968 (!sigismember(blocked_set,
969 target_to_host_signal_table[sig]))) {
970 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
971
972
973
974 goto restart_scan;
975 }
976 }
977
978
979
980
981
982 atomic_set(&ts->signal_pending, 0);
983 ts->in_sigsuspend = 0;
984 set = ts->signal_mask;
985 sigdelset(&set, SIGSEGV);
986 sigdelset(&set, SIGBUS);
987 sigprocmask(SIG_SETMASK, &set, 0);
988 }
989 ts->in_sigsuspend = 0;
990}
991