1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "qemu/bitops.h"
21#include <sys/ucontext.h>
22#include <sys/resource.h>
23
24#include "qemu.h"
25#include "trace.h"
26#include "signal-common.h"
27
28static struct target_sigaction sigact_table[TARGET_NSIG];
29
30static void host_signal_handler(int host_signum, siginfo_t *info,
31 void *puc);
32
33
34
35
36
37
38
39
40
41QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
42static uint8_t host_to_target_signal_table[_NSIG] = {
43 [SIGHUP] = TARGET_SIGHUP,
44 [SIGINT] = TARGET_SIGINT,
45 [SIGQUIT] = TARGET_SIGQUIT,
46 [SIGILL] = TARGET_SIGILL,
47 [SIGTRAP] = TARGET_SIGTRAP,
48 [SIGABRT] = TARGET_SIGABRT,
49
50 [SIGBUS] = TARGET_SIGBUS,
51 [SIGFPE] = TARGET_SIGFPE,
52 [SIGKILL] = TARGET_SIGKILL,
53 [SIGUSR1] = TARGET_SIGUSR1,
54 [SIGSEGV] = TARGET_SIGSEGV,
55 [SIGUSR2] = TARGET_SIGUSR2,
56 [SIGPIPE] = TARGET_SIGPIPE,
57 [SIGALRM] = TARGET_SIGALRM,
58 [SIGTERM] = TARGET_SIGTERM,
59#ifdef SIGSTKFLT
60 [SIGSTKFLT] = TARGET_SIGSTKFLT,
61#endif
62 [SIGCHLD] = TARGET_SIGCHLD,
63 [SIGCONT] = TARGET_SIGCONT,
64 [SIGSTOP] = TARGET_SIGSTOP,
65 [SIGTSTP] = TARGET_SIGTSTP,
66 [SIGTTIN] = TARGET_SIGTTIN,
67 [SIGTTOU] = TARGET_SIGTTOU,
68 [SIGURG] = TARGET_SIGURG,
69 [SIGXCPU] = TARGET_SIGXCPU,
70 [SIGXFSZ] = TARGET_SIGXFSZ,
71 [SIGVTALRM] = TARGET_SIGVTALRM,
72 [SIGPROF] = TARGET_SIGPROF,
73 [SIGWINCH] = TARGET_SIGWINCH,
74 [SIGIO] = TARGET_SIGIO,
75 [SIGPWR] = TARGET_SIGPWR,
76 [SIGSYS] = TARGET_SIGSYS,
77
78};
79
80static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
81
82
83int host_to_target_signal(int sig)
84{
85 if (sig < 1 || sig >= _NSIG) {
86 return sig;
87 }
88 return host_to_target_signal_table[sig];
89}
90
91
92int target_to_host_signal(int sig)
93{
94 if (sig < 1 || sig > TARGET_NSIG) {
95 return sig;
96 }
97 return target_to_host_signal_table[sig];
98}
99
100static inline void target_sigaddset(target_sigset_t *set, int signum)
101{
102 signum--;
103 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
104 set->sig[signum / TARGET_NSIG_BPW] |= mask;
105}
106
107static inline int target_sigismember(const target_sigset_t *set, int signum)
108{
109 signum--;
110 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
111 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
112}
113
114void host_to_target_sigset_internal(target_sigset_t *d,
115 const sigset_t *s)
116{
117 int host_sig, target_sig;
118 target_sigemptyset(d);
119 for (host_sig = 1; host_sig < _NSIG; host_sig++) {
120 target_sig = host_to_target_signal(host_sig);
121 if (target_sig < 1 || target_sig > TARGET_NSIG) {
122 continue;
123 }
124 if (sigismember(s, host_sig)) {
125 target_sigaddset(d, target_sig);
126 }
127 }
128}
129
130void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
131{
132 target_sigset_t d1;
133 int i;
134
135 host_to_target_sigset_internal(&d1, s);
136 for(i = 0;i < TARGET_NSIG_WORDS; i++)
137 d->sig[i] = tswapal(d1.sig[i]);
138}
139
140void target_to_host_sigset_internal(sigset_t *d,
141 const target_sigset_t *s)
142{
143 int host_sig, target_sig;
144 sigemptyset(d);
145 for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
146 host_sig = target_to_host_signal(target_sig);
147 if (host_sig < 1 || host_sig >= _NSIG) {
148 continue;
149 }
150 if (target_sigismember(s, target_sig)) {
151 sigaddset(d, host_sig);
152 }
153 }
154}
155
156void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
157{
158 target_sigset_t s1;
159 int i;
160
161 for(i = 0;i < TARGET_NSIG_WORDS; i++)
162 s1.sig[i] = tswapal(s->sig[i]);
163 target_to_host_sigset_internal(d, &s1);
164}
165
166void host_to_target_old_sigset(abi_ulong *old_sigset,
167 const sigset_t *sigset)
168{
169 target_sigset_t d;
170 host_to_target_sigset(&d, sigset);
171 *old_sigset = d.sig[0];
172}
173
174void target_to_host_old_sigset(sigset_t *sigset,
175 const abi_ulong *old_sigset)
176{
177 target_sigset_t d;
178 int i;
179
180 d.sig[0] = *old_sigset;
181 for(i = 1;i < TARGET_NSIG_WORDS; i++)
182 d.sig[i] = 0;
183 target_to_host_sigset(sigset, &d);
184}
185
186int block_signals(void)
187{
188 TaskState *ts = (TaskState *)thread_cpu->opaque;
189 sigset_t set;
190
191
192
193
194
195 sigfillset(&set);
196 sigprocmask(SIG_SETMASK, &set, 0);
197
198 return atomic_xchg(&ts->signal_pending, 1);
199}
200
201
202
203
204
205
206
207
208int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
209{
210 TaskState *ts = (TaskState *)thread_cpu->opaque;
211
212 if (oldset) {
213 *oldset = ts->signal_mask;
214 }
215
216 if (set) {
217 int i;
218
219 if (block_signals()) {
220 return -TARGET_ERESTARTSYS;
221 }
222
223 switch (how) {
224 case SIG_BLOCK:
225 sigorset(&ts->signal_mask, &ts->signal_mask, set);
226 break;
227 case SIG_UNBLOCK:
228 for (i = 1; i <= NSIG; ++i) {
229 if (sigismember(set, i)) {
230 sigdelset(&ts->signal_mask, i);
231 }
232 }
233 break;
234 case SIG_SETMASK:
235 ts->signal_mask = *set;
236 break;
237 default:
238 g_assert_not_reached();
239 }
240
241
242 sigdelset(&ts->signal_mask, SIGKILL);
243 sigdelset(&ts->signal_mask, SIGSTOP);
244 }
245 return 0;
246}
247
248#if !defined(TARGET_NIOS2)
249
250
251
252void set_sigmask(const sigset_t *set)
253{
254 TaskState *ts = (TaskState *)thread_cpu->opaque;
255
256 ts->signal_mask = *set;
257}
258#endif
259
260
261
262int on_sig_stack(unsigned long sp)
263{
264 TaskState *ts = (TaskState *)thread_cpu->opaque;
265
266 return (sp - ts->sigaltstack_used.ss_sp
267 < ts->sigaltstack_used.ss_size);
268}
269
270int sas_ss_flags(unsigned long sp)
271{
272 TaskState *ts = (TaskState *)thread_cpu->opaque;
273
274 return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
275 : on_sig_stack(sp) ? SS_ONSTACK : 0);
276}
277
278abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
279{
280
281
282
283 TaskState *ts = (TaskState *)thread_cpu->opaque;
284
285 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
286 return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
287 }
288 return sp;
289}
290
291void target_save_altstack(target_stack_t *uss, CPUArchState *env)
292{
293 TaskState *ts = (TaskState *)thread_cpu->opaque;
294
295 __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
296 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
297 __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
298}
299
300
301
302static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
303 const siginfo_t *info)
304{
305 int sig = host_to_target_signal(info->si_signo);
306 int si_code = info->si_code;
307 int si_type;
308 tinfo->si_signo = sig;
309 tinfo->si_errno = 0;
310 tinfo->si_code = info->si_code;
311
312
313
314
315
316
317
318 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335 switch (si_code) {
336 case SI_USER:
337 case SI_TKILL:
338 case SI_KERNEL:
339
340
341
342 tinfo->_sifields._kill._pid = info->si_pid;
343 tinfo->_sifields._kill._uid = info->si_uid;
344 si_type = QEMU_SI_KILL;
345 break;
346 default:
347
348 switch (sig) {
349 case TARGET_SIGCHLD:
350 tinfo->_sifields._sigchld._pid = info->si_pid;
351 tinfo->_sifields._sigchld._uid = info->si_uid;
352 tinfo->_sifields._sigchld._status
353 = host_to_target_waitstatus(info->si_status);
354 tinfo->_sifields._sigchld._utime = info->si_utime;
355 tinfo->_sifields._sigchld._stime = info->si_stime;
356 si_type = QEMU_SI_CHLD;
357 break;
358 case TARGET_SIGIO:
359 tinfo->_sifields._sigpoll._band = info->si_band;
360 tinfo->_sifields._sigpoll._fd = info->si_fd;
361 si_type = QEMU_SI_POLL;
362 break;
363 default:
364
365 tinfo->_sifields._rt._pid = info->si_pid;
366 tinfo->_sifields._rt._uid = info->si_uid;
367
368 tinfo->_sifields._rt._sigval.sival_ptr
369 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
370 si_type = QEMU_SI_RT;
371 break;
372 }
373 break;
374 }
375
376 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
377}
378
379void tswap_siginfo(target_siginfo_t *tinfo,
380 const target_siginfo_t *info)
381{
382 int si_type = extract32(info->si_code, 16, 16);
383 int si_code = sextract32(info->si_code, 0, 16);
384
385 __put_user(info->si_signo, &tinfo->si_signo);
386 __put_user(info->si_errno, &tinfo->si_errno);
387 __put_user(si_code, &tinfo->si_code);
388
389
390
391
392
393 switch (si_type) {
394 case QEMU_SI_KILL:
395 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
396 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
397 break;
398 case QEMU_SI_TIMER:
399 __put_user(info->_sifields._timer._timer1,
400 &tinfo->_sifields._timer._timer1);
401 __put_user(info->_sifields._timer._timer2,
402 &tinfo->_sifields._timer._timer2);
403 break;
404 case QEMU_SI_POLL:
405 __put_user(info->_sifields._sigpoll._band,
406 &tinfo->_sifields._sigpoll._band);
407 __put_user(info->_sifields._sigpoll._fd,
408 &tinfo->_sifields._sigpoll._fd);
409 break;
410 case QEMU_SI_FAULT:
411 __put_user(info->_sifields._sigfault._addr,
412 &tinfo->_sifields._sigfault._addr);
413 break;
414 case QEMU_SI_CHLD:
415 __put_user(info->_sifields._sigchld._pid,
416 &tinfo->_sifields._sigchld._pid);
417 __put_user(info->_sifields._sigchld._uid,
418 &tinfo->_sifields._sigchld._uid);
419 __put_user(info->_sifields._sigchld._status,
420 &tinfo->_sifields._sigchld._status);
421 __put_user(info->_sifields._sigchld._utime,
422 &tinfo->_sifields._sigchld._utime);
423 __put_user(info->_sifields._sigchld._stime,
424 &tinfo->_sifields._sigchld._stime);
425 break;
426 case QEMU_SI_RT:
427 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
428 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
429 __put_user(info->_sifields._rt._sigval.sival_ptr,
430 &tinfo->_sifields._rt._sigval.sival_ptr);
431 break;
432 default:
433 g_assert_not_reached();
434 }
435}
436
437void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
438{
439 target_siginfo_t tgt_tmp;
440 host_to_target_siginfo_noswap(&tgt_tmp, info);
441 tswap_siginfo(tinfo, &tgt_tmp);
442}
443
444
445
446void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
447{
448
449
450
451 abi_ulong sival_ptr;
452
453 __get_user(info->si_signo, &tinfo->si_signo);
454 __get_user(info->si_errno, &tinfo->si_errno);
455 __get_user(info->si_code, &tinfo->si_code);
456 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
457 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
458 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
459 info->si_value.sival_ptr = (void *)(long)sival_ptr;
460}
461
462static int fatal_signal (int sig)
463{
464 switch (sig) {
465 case TARGET_SIGCHLD:
466 case TARGET_SIGURG:
467 case TARGET_SIGWINCH:
468
469 return 0;
470 case TARGET_SIGCONT:
471 case TARGET_SIGSTOP:
472 case TARGET_SIGTSTP:
473 case TARGET_SIGTTIN:
474 case TARGET_SIGTTOU:
475
476 return 0;
477 default:
478 return 1;
479 }
480}
481
482
483static int core_dump_signal(int sig)
484{
485 switch (sig) {
486 case TARGET_SIGABRT:
487 case TARGET_SIGFPE:
488 case TARGET_SIGILL:
489 case TARGET_SIGQUIT:
490 case TARGET_SIGSEGV:
491 case TARGET_SIGTRAP:
492 case TARGET_SIGBUS:
493 return (1);
494 default:
495 return (0);
496 }
497}
498
499static void signal_table_init(void)
500{
501 int host_sig, target_sig, count;
502
503
504
505
506
507
508
509
510
511
512
513
514 for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
515 target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
516 if (target_sig <= TARGET_NSIG) {
517 host_to_target_signal_table[host_sig] = target_sig;
518 }
519 }
520
521
522 for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
523 target_to_host_signal_table[target_sig] = _NSIG;
524 }
525 for (host_sig = 1; host_sig < _NSIG; host_sig++) {
526 if (host_to_target_signal_table[host_sig] == 0) {
527 host_to_target_signal_table[host_sig] = host_sig;
528 }
529 target_sig = host_to_target_signal_table[host_sig];
530 if (target_sig <= TARGET_NSIG) {
531 target_to_host_signal_table[target_sig] = host_sig;
532 }
533 }
534
535 if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
536 for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
537 if (target_to_host_signal_table[target_sig] == _NSIG) {
538 count++;
539 }
540 }
541 trace_signal_table_init(count);
542 }
543}
544
545void signal_init(void)
546{
547 TaskState *ts = (TaskState *)thread_cpu->opaque;
548 struct sigaction act;
549 struct sigaction oact;
550 int i;
551 int host_sig;
552
553
554 signal_table_init();
555
556
557 sigprocmask(0, 0, &ts->signal_mask);
558
559 sigfillset(&act.sa_mask);
560 act.sa_flags = SA_SIGINFO;
561 act.sa_sigaction = host_signal_handler;
562 for(i = 1; i <= TARGET_NSIG; i++) {
563#ifdef CONFIG_GPROF
564 if (i == TARGET_SIGPROF) {
565 continue;
566 }
567#endif
568 host_sig = target_to_host_signal(i);
569 sigaction(host_sig, NULL, &oact);
570 if (oact.sa_sigaction == (void *)SIG_IGN) {
571 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
572 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
573 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
574 }
575
576
577
578
579
580
581 if (fatal_signal (i))
582 sigaction(host_sig, &act, NULL);
583 }
584}
585
586
587
588
589
590void force_sig(int sig)
591{
592 CPUState *cpu = thread_cpu;
593 CPUArchState *env = cpu->env_ptr;
594 target_siginfo_t info;
595
596 info.si_signo = sig;
597 info.si_errno = 0;
598 info.si_code = TARGET_SI_KERNEL;
599 info._sifields._kill._pid = 0;
600 info._sifields._kill._uid = 0;
601 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
602}
603
604
605
606
607
608#if !defined(TARGET_RISCV)
609void force_sigsegv(int oldsig)
610{
611 if (oldsig == SIGSEGV) {
612
613
614
615 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
616 }
617 force_sig(TARGET_SIGSEGV);
618}
619
620#endif
621
622
623static void QEMU_NORETURN dump_core_and_abort(int target_sig)
624{
625 CPUState *cpu = thread_cpu;
626 CPUArchState *env = cpu->env_ptr;
627 TaskState *ts = (TaskState *)cpu->opaque;
628 int host_sig, core_dumped = 0;
629 struct sigaction act;
630
631 host_sig = target_to_host_signal(target_sig);
632 trace_user_force_sig(env, target_sig, host_sig);
633 gdb_signalled(env, target_sig);
634
635
636 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
637 stop_all_tasks();
638 core_dumped =
639 ((*ts->bprm->core_dump)(target_sig, env) == 0);
640 }
641 if (core_dumped) {
642
643
644 struct rlimit nodump;
645 getrlimit(RLIMIT_CORE, &nodump);
646 nodump.rlim_cur=0;
647 setrlimit(RLIMIT_CORE, &nodump);
648 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
649 target_sig, strsignal(host_sig), "core dumped" );
650 }
651
652
653
654
655
656
657
658 sigfillset(&act.sa_mask);
659 act.sa_handler = SIG_DFL;
660 act.sa_flags = 0;
661 sigaction(host_sig, &act, NULL);
662
663
664
665 kill(getpid(), host_sig);
666
667
668
669 sigdelset(&act.sa_mask, host_sig);
670 sigsuspend(&act.sa_mask);
671
672
673 abort();
674}
675
676
677
678int queue_signal(CPUArchState *env, int sig, int si_type,
679 target_siginfo_t *info)
680{
681 CPUState *cpu = env_cpu(env);
682 TaskState *ts = cpu->opaque;
683
684 trace_user_queue_signal(env, sig);
685
686 info->si_code = deposit32(info->si_code, 16, 16, si_type);
687
688 ts->sync_signal.info = *info;
689 ts->sync_signal.pending = sig;
690
691 atomic_set(&ts->signal_pending, 1);
692 return 1;
693}
694
695#ifndef HAVE_SAFE_SYSCALL
696static inline void rewind_if_in_safe_syscall(void *puc)
697{
698
699}
700#endif
701
702static void host_signal_handler(int host_signum, siginfo_t *info,
703 void *puc)
704{
705 CPUArchState *env = thread_cpu->env_ptr;
706 CPUState *cpu = env_cpu(env);
707 TaskState *ts = cpu->opaque;
708
709 int sig;
710 target_siginfo_t tinfo;
711 ucontext_t *uc = puc;
712 struct emulated_sigtable *k;
713
714
715
716 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
717 && info->si_code > 0) {
718 if (cpu_signal_handler(host_signum, info, puc))
719 return;
720 }
721
722
723 sig = host_to_target_signal(host_signum);
724 if (sig < 1 || sig > TARGET_NSIG)
725 return;
726 trace_user_host_signal(env, host_signum, sig);
727
728 rewind_if_in_safe_syscall(puc);
729
730 host_to_target_siginfo_noswap(&tinfo, info);
731 k = &ts->sigtab[sig - 1];
732 k->info = tinfo;
733 k->pending = sig;
734 ts->signal_pending = 1;
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
751 sigdelset(&uc->uc_sigmask, SIGSEGV);
752 sigdelset(&uc->uc_sigmask, SIGBUS);
753
754
755 cpu_exit(thread_cpu);
756}
757
758
759
760abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
761{
762 int ret;
763 struct target_sigaltstack oss;
764 TaskState *ts = (TaskState *)thread_cpu->opaque;
765
766
767 if(uoss_addr)
768 {
769 __put_user(ts->sigaltstack_used.ss_sp, &oss.ss_sp);
770 __put_user(ts->sigaltstack_used.ss_size, &oss.ss_size);
771 __put_user(sas_ss_flags(sp), &oss.ss_flags);
772 }
773
774 if(uss_addr)
775 {
776 struct target_sigaltstack *uss;
777 struct target_sigaltstack ss;
778 size_t minstacksize = TARGET_MINSIGSTKSZ;
779
780#if defined(TARGET_PPC64)
781
782 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
783 if (get_ppc64_abi(image) > 1) {
784 minstacksize = 4096;
785 }
786#endif
787
788 ret = -TARGET_EFAULT;
789 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
790 goto out;
791 }
792 __get_user(ss.ss_sp, &uss->ss_sp);
793 __get_user(ss.ss_size, &uss->ss_size);
794 __get_user(ss.ss_flags, &uss->ss_flags);
795 unlock_user_struct(uss, uss_addr, 0);
796
797 ret = -TARGET_EPERM;
798 if (on_sig_stack(sp))
799 goto out;
800
801 ret = -TARGET_EINVAL;
802 if (ss.ss_flags != TARGET_SS_DISABLE
803 && ss.ss_flags != TARGET_SS_ONSTACK
804 && ss.ss_flags != 0)
805 goto out;
806
807 if (ss.ss_flags == TARGET_SS_DISABLE) {
808 ss.ss_size = 0;
809 ss.ss_sp = 0;
810 } else {
811 ret = -TARGET_ENOMEM;
812 if (ss.ss_size < minstacksize) {
813 goto out;
814 }
815 }
816
817 ts->sigaltstack_used.ss_sp = ss.ss_sp;
818 ts->sigaltstack_used.ss_size = ss.ss_size;
819 }
820
821 if (uoss_addr) {
822 ret = -TARGET_EFAULT;
823 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
824 goto out;
825 }
826
827 ret = 0;
828out:
829 return ret;
830}
831
832
833int do_sigaction(int sig, const struct target_sigaction *act,
834 struct target_sigaction *oact)
835{
836 struct target_sigaction *k;
837 struct sigaction act1;
838 int host_sig;
839 int ret = 0;
840
841 trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
842
843 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
844 return -TARGET_EINVAL;
845 }
846
847 if (block_signals()) {
848 return -TARGET_ERESTARTSYS;
849 }
850
851 k = &sigact_table[sig - 1];
852 if (oact) {
853 __put_user(k->_sa_handler, &oact->_sa_handler);
854 __put_user(k->sa_flags, &oact->sa_flags);
855#ifdef TARGET_ARCH_HAS_SA_RESTORER
856 __put_user(k->sa_restorer, &oact->sa_restorer);
857#endif
858
859 oact->sa_mask = k->sa_mask;
860 }
861 if (act) {
862
863 __get_user(k->_sa_handler, &act->_sa_handler);
864 __get_user(k->sa_flags, &act->sa_flags);
865#ifdef TARGET_ARCH_HAS_SA_RESTORER
866 __get_user(k->sa_restorer, &act->sa_restorer);
867#endif
868
869 k->sa_mask = act->sa_mask;
870
871
872 host_sig = target_to_host_signal(sig);
873 trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
874 if (host_sig > SIGRTMAX) {
875
876 qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
877 sig);
878
879
880
881
882
883
884
885
886
887
888 return 0;
889 }
890 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
891 sigfillset(&act1.sa_mask);
892 act1.sa_flags = SA_SIGINFO;
893 if (k->sa_flags & TARGET_SA_RESTART)
894 act1.sa_flags |= SA_RESTART;
895
896
897
898 if (k->_sa_handler == TARGET_SIG_IGN) {
899 act1.sa_sigaction = (void *)SIG_IGN;
900 } else if (k->_sa_handler == TARGET_SIG_DFL) {
901 if (fatal_signal (sig))
902 act1.sa_sigaction = host_signal_handler;
903 else
904 act1.sa_sigaction = (void *)SIG_DFL;
905 } else {
906 act1.sa_sigaction = host_signal_handler;
907 }
908 ret = sigaction(host_sig, &act1, NULL);
909 }
910 }
911 return ret;
912}
913
914static void handle_pending_signal(CPUArchState *cpu_env, int sig,
915 struct emulated_sigtable *k)
916{
917 CPUState *cpu = env_cpu(cpu_env);
918 abi_ulong handler;
919 sigset_t set;
920 target_sigset_t target_old_set;
921 struct target_sigaction *sa;
922 TaskState *ts = cpu->opaque;
923
924 trace_user_handle_signal(cpu_env, sig);
925
926 k->pending = 0;
927
928 sig = gdb_handlesig(cpu, sig);
929 if (!sig) {
930 sa = NULL;
931 handler = TARGET_SIG_IGN;
932 } else {
933 sa = &sigact_table[sig - 1];
934 handler = sa->_sa_handler;
935 }
936
937 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
938 print_taken_signal(sig, &k->info);
939 }
940
941 if (handler == TARGET_SIG_DFL) {
942
943 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
944 kill(getpid(),SIGSTOP);
945 } else if (sig != TARGET_SIGCHLD &&
946 sig != TARGET_SIGURG &&
947 sig != TARGET_SIGWINCH &&
948 sig != TARGET_SIGCONT) {
949 dump_core_and_abort(sig);
950 }
951 } else if (handler == TARGET_SIG_IGN) {
952
953 } else if (handler == TARGET_SIG_ERR) {
954 dump_core_and_abort(sig);
955 } else {
956
957 sigset_t *blocked_set;
958
959 target_to_host_sigset(&set, &sa->sa_mask);
960
961
962 if (!(sa->sa_flags & TARGET_SA_NODEFER))
963 sigaddset(&set, target_to_host_signal(sig));
964
965
966
967 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
968
969
970 blocked_set = ts->in_sigsuspend ?
971 &ts->sigsuspend_mask : &ts->signal_mask;
972 sigorset(&ts->signal_mask, blocked_set, &set);
973 ts->in_sigsuspend = 0;
974
975
976#if defined(TARGET_I386) && !defined(TARGET_X86_64)
977 {
978 CPUX86State *env = cpu_env;
979 if (env->eflags & VM_MASK)
980 save_v86_state(env);
981 }
982#endif
983
984#if defined(TARGET_ARCH_HAS_SETUP_FRAME)
985 if (sa->sa_flags & TARGET_SA_SIGINFO) {
986 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
987 } else {
988 setup_frame(sig, sa, &target_old_set, cpu_env);
989 }
990#else
991
992 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
993#endif
994 if (sa->sa_flags & TARGET_SA_RESETHAND) {
995 sa->_sa_handler = TARGET_SIG_DFL;
996 }
997 }
998}
999
1000void process_pending_signals(CPUArchState *cpu_env)
1001{
1002 CPUState *cpu = env_cpu(cpu_env);
1003 int sig;
1004 TaskState *ts = cpu->opaque;
1005 sigset_t set;
1006 sigset_t *blocked_set;
1007
1008 while (atomic_read(&ts->signal_pending)) {
1009
1010 sigfillset(&set);
1011 sigprocmask(SIG_SETMASK, &set, 0);
1012
1013 restart_scan:
1014 sig = ts->sync_signal.pending;
1015 if (sig) {
1016
1017
1018
1019
1020
1021
1022
1023
1024 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1025 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1026 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1027 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1028 }
1029
1030 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1031 }
1032
1033 for (sig = 1; sig <= TARGET_NSIG; sig++) {
1034 blocked_set = ts->in_sigsuspend ?
1035 &ts->sigsuspend_mask : &ts->signal_mask;
1036
1037 if (ts->sigtab[sig - 1].pending &&
1038 (!sigismember(blocked_set,
1039 target_to_host_signal_table[sig]))) {
1040 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1041
1042
1043
1044 goto restart_scan;
1045 }
1046 }
1047
1048
1049
1050
1051
1052 atomic_set(&ts->signal_pending, 0);
1053 ts->in_sigsuspend = 0;
1054 set = ts->signal_mask;
1055 sigdelset(&set, SIGSEGV);
1056 sigdelset(&set, SIGBUS);
1057 sigprocmask(SIG_SETMASK, &set, 0);
1058 }
1059 ts->in_sigsuspend = 0;
1060}
1061