1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include "qemu/bitops.h"
21#include "exec/gdbstub.h"
22#include "hw/core/tcg-cpu-ops.h"
23
24#include <sys/ucontext.h>
25#include <sys/resource.h>
26
27#include "qemu.h"
28#include "user-internals.h"
29#include "strace.h"
30#include "loader.h"
31#include "trace.h"
32#include "signal-common.h"
33#include "host-signal.h"
34#include "user/safe-syscall.h"
35
36static struct target_sigaction sigact_table[TARGET_NSIG];
37
38static void host_signal_handler(int host_signum, siginfo_t *info,
39 void *puc);
40
41
42abi_ulong default_sigreturn;
43abi_ulong default_rt_sigreturn;
44
45
46
47
48
49
50
51
52#ifdef __SIGRTMAX
53QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
54#endif
55static uint8_t host_to_target_signal_table[_NSIG] = {
56#define MAKE_SIG_ENTRY(sig) [sig] = TARGET_##sig,
57 MAKE_SIGNAL_LIST
58#undef MAKE_SIG_ENTRY
59
60};
61
62static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
63
64
65int host_to_target_signal(int sig)
66{
67 if (sig < 1 || sig >= _NSIG) {
68 return sig;
69 }
70 return host_to_target_signal_table[sig];
71}
72
73
74int target_to_host_signal(int sig)
75{
76 if (sig < 1 || sig > TARGET_NSIG) {
77 return sig;
78 }
79 return target_to_host_signal_table[sig];
80}
81
82static inline void target_sigaddset(target_sigset_t *set, int signum)
83{
84 signum--;
85 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
86 set->sig[signum / TARGET_NSIG_BPW] |= mask;
87}
88
89static inline int target_sigismember(const target_sigset_t *set, int signum)
90{
91 signum--;
92 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
93 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
94}
95
96void host_to_target_sigset_internal(target_sigset_t *d,
97 const sigset_t *s)
98{
99 int host_sig, target_sig;
100 target_sigemptyset(d);
101 for (host_sig = 1; host_sig < _NSIG; host_sig++) {
102 target_sig = host_to_target_signal(host_sig);
103 if (target_sig < 1 || target_sig > TARGET_NSIG) {
104 continue;
105 }
106 if (sigismember(s, host_sig)) {
107 target_sigaddset(d, target_sig);
108 }
109 }
110}
111
112void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
113{
114 target_sigset_t d1;
115 int i;
116
117 host_to_target_sigset_internal(&d1, s);
118 for(i = 0;i < TARGET_NSIG_WORDS; i++)
119 d->sig[i] = tswapal(d1.sig[i]);
120}
121
122void target_to_host_sigset_internal(sigset_t *d,
123 const target_sigset_t *s)
124{
125 int host_sig, target_sig;
126 sigemptyset(d);
127 for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
128 host_sig = target_to_host_signal(target_sig);
129 if (host_sig < 1 || host_sig >= _NSIG) {
130 continue;
131 }
132 if (target_sigismember(s, target_sig)) {
133 sigaddset(d, host_sig);
134 }
135 }
136}
137
138void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
139{
140 target_sigset_t s1;
141 int i;
142
143 for(i = 0;i < TARGET_NSIG_WORDS; i++)
144 s1.sig[i] = tswapal(s->sig[i]);
145 target_to_host_sigset_internal(d, &s1);
146}
147
148void host_to_target_old_sigset(abi_ulong *old_sigset,
149 const sigset_t *sigset)
150{
151 target_sigset_t d;
152 host_to_target_sigset(&d, sigset);
153 *old_sigset = d.sig[0];
154}
155
156void target_to_host_old_sigset(sigset_t *sigset,
157 const abi_ulong *old_sigset)
158{
159 target_sigset_t d;
160 int i;
161
162 d.sig[0] = *old_sigset;
163 for(i = 1;i < TARGET_NSIG_WORDS; i++)
164 d.sig[i] = 0;
165 target_to_host_sigset(sigset, &d);
166}
167
168int block_signals(void)
169{
170 TaskState *ts = (TaskState *)thread_cpu->opaque;
171 sigset_t set;
172
173
174
175
176
177 sigfillset(&set);
178 sigprocmask(SIG_SETMASK, &set, 0);
179
180 return qatomic_xchg(&ts->signal_pending, 1);
181}
182
183
184
185
186
187
188
189
190int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
191{
192 TaskState *ts = (TaskState *)thread_cpu->opaque;
193
194 if (oldset) {
195 *oldset = ts->signal_mask;
196 }
197
198 if (set) {
199 int i;
200
201 if (block_signals()) {
202 return -QEMU_ERESTARTSYS;
203 }
204
205 switch (how) {
206 case SIG_BLOCK:
207 sigorset(&ts->signal_mask, &ts->signal_mask, set);
208 break;
209 case SIG_UNBLOCK:
210 for (i = 1; i <= NSIG; ++i) {
211 if (sigismember(set, i)) {
212 sigdelset(&ts->signal_mask, i);
213 }
214 }
215 break;
216 case SIG_SETMASK:
217 ts->signal_mask = *set;
218 break;
219 default:
220 g_assert_not_reached();
221 }
222
223
224 sigdelset(&ts->signal_mask, SIGKILL);
225 sigdelset(&ts->signal_mask, SIGSTOP);
226 }
227 return 0;
228}
229
230
231
232
233void set_sigmask(const sigset_t *set)
234{
235 TaskState *ts = (TaskState *)thread_cpu->opaque;
236
237 ts->signal_mask = *set;
238}
239
240
241
242int on_sig_stack(unsigned long sp)
243{
244 TaskState *ts = (TaskState *)thread_cpu->opaque;
245
246 return (sp - ts->sigaltstack_used.ss_sp
247 < ts->sigaltstack_used.ss_size);
248}
249
250int sas_ss_flags(unsigned long sp)
251{
252 TaskState *ts = (TaskState *)thread_cpu->opaque;
253
254 return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
255 : on_sig_stack(sp) ? SS_ONSTACK : 0);
256}
257
258abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
259{
260
261
262
263 TaskState *ts = (TaskState *)thread_cpu->opaque;
264
265 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
266 return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
267 }
268 return sp;
269}
270
271void target_save_altstack(target_stack_t *uss, CPUArchState *env)
272{
273 TaskState *ts = (TaskState *)thread_cpu->opaque;
274
275 __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
276 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
277 __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
278}
279
280abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
281{
282 TaskState *ts = (TaskState *)thread_cpu->opaque;
283 size_t minstacksize = TARGET_MINSIGSTKSZ;
284 target_stack_t ss;
285
286#if defined(TARGET_PPC64)
287
288 struct image_info *image = ts->info;
289 if (get_ppc64_abi(image) > 1) {
290 minstacksize = 4096;
291 }
292#endif
293
294 __get_user(ss.ss_sp, &uss->ss_sp);
295 __get_user(ss.ss_size, &uss->ss_size);
296 __get_user(ss.ss_flags, &uss->ss_flags);
297
298 if (on_sig_stack(get_sp_from_cpustate(env))) {
299 return -TARGET_EPERM;
300 }
301
302 switch (ss.ss_flags) {
303 default:
304 return -TARGET_EINVAL;
305
306 case TARGET_SS_DISABLE:
307 ss.ss_size = 0;
308 ss.ss_sp = 0;
309 break;
310
311 case TARGET_SS_ONSTACK:
312 case 0:
313 if (ss.ss_size < minstacksize) {
314 return -TARGET_ENOMEM;
315 }
316 break;
317 }
318
319 ts->sigaltstack_used.ss_sp = ss.ss_sp;
320 ts->sigaltstack_used.ss_size = ss.ss_size;
321 return 0;
322}
323
324
325
326static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
327 const siginfo_t *info)
328{
329 int sig = host_to_target_signal(info->si_signo);
330 int si_code = info->si_code;
331 int si_type;
332 tinfo->si_signo = sig;
333 tinfo->si_errno = 0;
334 tinfo->si_code = info->si_code;
335
336
337
338
339
340
341
342 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359 switch (si_code) {
360 case SI_USER:
361 case SI_TKILL:
362 case SI_KERNEL:
363
364
365
366 tinfo->_sifields._kill._pid = info->si_pid;
367 tinfo->_sifields._kill._uid = info->si_uid;
368 si_type = QEMU_SI_KILL;
369 break;
370 default:
371
372 switch (sig) {
373 case TARGET_SIGCHLD:
374 tinfo->_sifields._sigchld._pid = info->si_pid;
375 tinfo->_sifields._sigchld._uid = info->si_uid;
376 if (si_code == CLD_EXITED)
377 tinfo->_sifields._sigchld._status = info->si_status;
378 else
379 tinfo->_sifields._sigchld._status
380 = host_to_target_signal(info->si_status & 0x7f)
381 | (info->si_status & ~0x7f);
382 tinfo->_sifields._sigchld._utime = info->si_utime;
383 tinfo->_sifields._sigchld._stime = info->si_stime;
384 si_type = QEMU_SI_CHLD;
385 break;
386 case TARGET_SIGIO:
387 tinfo->_sifields._sigpoll._band = info->si_band;
388 tinfo->_sifields._sigpoll._fd = info->si_fd;
389 si_type = QEMU_SI_POLL;
390 break;
391 default:
392
393 tinfo->_sifields._rt._pid = info->si_pid;
394 tinfo->_sifields._rt._uid = info->si_uid;
395
396 tinfo->_sifields._rt._sigval.sival_ptr
397 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
398 si_type = QEMU_SI_RT;
399 break;
400 }
401 break;
402 }
403
404 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
405}
406
407void tswap_siginfo(target_siginfo_t *tinfo,
408 const target_siginfo_t *info)
409{
410 int si_type = extract32(info->si_code, 16, 16);
411 int si_code = sextract32(info->si_code, 0, 16);
412
413 __put_user(info->si_signo, &tinfo->si_signo);
414 __put_user(info->si_errno, &tinfo->si_errno);
415 __put_user(si_code, &tinfo->si_code);
416
417
418
419
420
421 switch (si_type) {
422 case QEMU_SI_KILL:
423 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
424 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
425 break;
426 case QEMU_SI_TIMER:
427 __put_user(info->_sifields._timer._timer1,
428 &tinfo->_sifields._timer._timer1);
429 __put_user(info->_sifields._timer._timer2,
430 &tinfo->_sifields._timer._timer2);
431 break;
432 case QEMU_SI_POLL:
433 __put_user(info->_sifields._sigpoll._band,
434 &tinfo->_sifields._sigpoll._band);
435 __put_user(info->_sifields._sigpoll._fd,
436 &tinfo->_sifields._sigpoll._fd);
437 break;
438 case QEMU_SI_FAULT:
439 __put_user(info->_sifields._sigfault._addr,
440 &tinfo->_sifields._sigfault._addr);
441 break;
442 case QEMU_SI_CHLD:
443 __put_user(info->_sifields._sigchld._pid,
444 &tinfo->_sifields._sigchld._pid);
445 __put_user(info->_sifields._sigchld._uid,
446 &tinfo->_sifields._sigchld._uid);
447 __put_user(info->_sifields._sigchld._status,
448 &tinfo->_sifields._sigchld._status);
449 __put_user(info->_sifields._sigchld._utime,
450 &tinfo->_sifields._sigchld._utime);
451 __put_user(info->_sifields._sigchld._stime,
452 &tinfo->_sifields._sigchld._stime);
453 break;
454 case QEMU_SI_RT:
455 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
456 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
457 __put_user(info->_sifields._rt._sigval.sival_ptr,
458 &tinfo->_sifields._rt._sigval.sival_ptr);
459 break;
460 default:
461 g_assert_not_reached();
462 }
463}
464
465void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
466{
467 target_siginfo_t tgt_tmp;
468 host_to_target_siginfo_noswap(&tgt_tmp, info);
469 tswap_siginfo(tinfo, &tgt_tmp);
470}
471
472
473
474void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
475{
476
477
478
479 abi_ulong sival_ptr;
480
481 __get_user(info->si_signo, &tinfo->si_signo);
482 __get_user(info->si_errno, &tinfo->si_errno);
483 __get_user(info->si_code, &tinfo->si_code);
484 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
485 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
486 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
487 info->si_value.sival_ptr = (void *)(long)sival_ptr;
488}
489
490static int fatal_signal (int sig)
491{
492 switch (sig) {
493 case TARGET_SIGCHLD:
494 case TARGET_SIGURG:
495 case TARGET_SIGWINCH:
496
497 return 0;
498 case TARGET_SIGCONT:
499 case TARGET_SIGSTOP:
500 case TARGET_SIGTSTP:
501 case TARGET_SIGTTIN:
502 case TARGET_SIGTTOU:
503
504 return 0;
505 default:
506 return 1;
507 }
508}
509
510
511static int core_dump_signal(int sig)
512{
513 switch (sig) {
514 case TARGET_SIGABRT:
515 case TARGET_SIGFPE:
516 case TARGET_SIGILL:
517 case TARGET_SIGQUIT:
518 case TARGET_SIGSEGV:
519 case TARGET_SIGTRAP:
520 case TARGET_SIGBUS:
521 return (1);
522 default:
523 return (0);
524 }
525}
526
527static void signal_table_init(void)
528{
529 int host_sig, target_sig, count;
530
531
532
533
534
535
536
537
538
539
540
541
542 for (host_sig = SIGRTMIN; host_sig <= SIGRTMAX; host_sig++) {
543 target_sig = host_sig - SIGRTMIN + TARGET_SIGRTMIN;
544 if (target_sig <= TARGET_NSIG) {
545 host_to_target_signal_table[host_sig] = target_sig;
546 }
547 }
548
549
550 for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
551 target_to_host_signal_table[target_sig] = _NSIG;
552 }
553 for (host_sig = 1; host_sig < _NSIG; host_sig++) {
554 if (host_to_target_signal_table[host_sig] == 0) {
555 host_to_target_signal_table[host_sig] = host_sig;
556 }
557 target_sig = host_to_target_signal_table[host_sig];
558 if (target_sig <= TARGET_NSIG) {
559 target_to_host_signal_table[target_sig] = host_sig;
560 }
561 }
562
563 if (trace_event_get_state_backends(TRACE_SIGNAL_TABLE_INIT)) {
564 for (target_sig = 1, count = 0; target_sig <= TARGET_NSIG; target_sig++) {
565 if (target_to_host_signal_table[target_sig] == _NSIG) {
566 count++;
567 }
568 }
569 trace_signal_table_init(count);
570 }
571}
572
573void signal_init(void)
574{
575 TaskState *ts = (TaskState *)thread_cpu->opaque;
576 struct sigaction act;
577 struct sigaction oact;
578 int i;
579 int host_sig;
580
581
582 signal_table_init();
583
584
585 sigprocmask(0, 0, &ts->signal_mask);
586
587 sigfillset(&act.sa_mask);
588 act.sa_flags = SA_SIGINFO;
589 act.sa_sigaction = host_signal_handler;
590 for(i = 1; i <= TARGET_NSIG; i++) {
591#ifdef CONFIG_GPROF
592 if (i == TARGET_SIGPROF) {
593 continue;
594 }
595#endif
596 host_sig = target_to_host_signal(i);
597 sigaction(host_sig, NULL, &oact);
598 if (oact.sa_sigaction == (void *)SIG_IGN) {
599 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
600 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
601 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
602 }
603
604
605
606
607
608
609 if (fatal_signal (i))
610 sigaction(host_sig, &act, NULL);
611 }
612}
613
614
615
616
617
618void force_sig(int sig)
619{
620 CPUState *cpu = thread_cpu;
621 CPUArchState *env = cpu->env_ptr;
622 target_siginfo_t info = {};
623
624 info.si_signo = sig;
625 info.si_errno = 0;
626 info.si_code = TARGET_SI_KERNEL;
627 info._sifields._kill._pid = 0;
628 info._sifields._kill._uid = 0;
629 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
630}
631
632
633
634
635
636void force_sig_fault(int sig, int code, abi_ulong addr)
637{
638 CPUState *cpu = thread_cpu;
639 CPUArchState *env = cpu->env_ptr;
640 target_siginfo_t info = {};
641
642 info.si_signo = sig;
643 info.si_errno = 0;
644 info.si_code = code;
645 info._sifields._sigfault._addr = addr;
646 queue_signal(env, sig, QEMU_SI_FAULT, &info);
647}
648
649
650
651
652
653#if !defined(TARGET_RISCV)
654void force_sigsegv(int oldsig)
655{
656 if (oldsig == SIGSEGV) {
657
658
659
660 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
661 }
662 force_sig(TARGET_SIGSEGV);
663}
664#endif
665
666void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
667 MMUAccessType access_type, bool maperr, uintptr_t ra)
668{
669 const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
670
671 if (tcg_ops->record_sigsegv) {
672 tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
673 }
674
675 force_sig_fault(TARGET_SIGSEGV,
676 maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
677 addr);
678 cpu->exception_index = EXCP_INTERRUPT;
679 cpu_loop_exit_restore(cpu, ra);
680}
681
682void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
683 MMUAccessType access_type, uintptr_t ra)
684{
685 const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
686
687 if (tcg_ops->record_sigbus) {
688 tcg_ops->record_sigbus(cpu, addr, access_type, ra);
689 }
690
691 force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
692 cpu->exception_index = EXCP_INTERRUPT;
693 cpu_loop_exit_restore(cpu, ra);
694}
695
696
697static G_NORETURN
698void dump_core_and_abort(int target_sig)
699{
700 CPUState *cpu = thread_cpu;
701 CPUArchState *env = cpu->env_ptr;
702 TaskState *ts = (TaskState *)cpu->opaque;
703 int host_sig, core_dumped = 0;
704 struct sigaction act;
705
706 host_sig = target_to_host_signal(target_sig);
707 trace_user_dump_core_and_abort(env, target_sig, host_sig);
708 gdb_signalled(env, target_sig);
709
710
711 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
712 stop_all_tasks();
713 core_dumped =
714 ((*ts->bprm->core_dump)(target_sig, env) == 0);
715 }
716 if (core_dumped) {
717
718
719 struct rlimit nodump;
720 getrlimit(RLIMIT_CORE, &nodump);
721 nodump.rlim_cur=0;
722 setrlimit(RLIMIT_CORE, &nodump);
723 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
724 target_sig, strsignal(host_sig), "core dumped" );
725 }
726
727
728
729
730
731
732
733 sigfillset(&act.sa_mask);
734 act.sa_handler = SIG_DFL;
735 act.sa_flags = 0;
736 sigaction(host_sig, &act, NULL);
737
738
739
740 kill(getpid(), host_sig);
741
742
743
744 sigdelset(&act.sa_mask, host_sig);
745 sigsuspend(&act.sa_mask);
746
747
748 abort();
749}
750
751
752
753void queue_signal(CPUArchState *env, int sig, int si_type,
754 target_siginfo_t *info)
755{
756 CPUState *cpu = env_cpu(env);
757 TaskState *ts = cpu->opaque;
758
759 trace_user_queue_signal(env, sig);
760
761 info->si_code = deposit32(info->si_code, 16, 16, si_type);
762
763 ts->sync_signal.info = *info;
764 ts->sync_signal.pending = sig;
765
766 qatomic_set(&ts->signal_pending, 1);
767}
768
769
770
771static inline void rewind_if_in_safe_syscall(void *puc)
772{
773 host_sigcontext *uc = (host_sigcontext *)puc;
774 uintptr_t pcreg = host_signal_pc(uc);
775
776 if (pcreg > (uintptr_t)safe_syscall_start
777 && pcreg < (uintptr_t)safe_syscall_end) {
778 host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
779 }
780}
781
782static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
783{
784 CPUArchState *env = thread_cpu->env_ptr;
785 CPUState *cpu = env_cpu(env);
786 TaskState *ts = cpu->opaque;
787 target_siginfo_t tinfo;
788 host_sigcontext *uc = puc;
789 struct emulated_sigtable *k;
790 int guest_sig;
791 uintptr_t pc = 0;
792 bool sync_sig = false;
793 void *sigmask = host_signal_mask(uc);
794
795
796
797
798
799 if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) {
800 MMUAccessType access_type;
801 uintptr_t host_addr;
802 abi_ptr guest_addr;
803 bool is_write;
804
805 host_addr = (uintptr_t)info->si_addr;
806
807
808
809
810
811 guest_addr = h2g_nocheck(host_addr);
812
813 pc = host_signal_pc(uc);
814 is_write = host_signal_write(info, uc);
815 access_type = adjust_signal_pc(&pc, is_write);
816
817 if (host_sig == SIGSEGV) {
818 bool maperr = true;
819
820 if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
821
822 if (is_write &&
823 handle_sigsegv_accerr_write(cpu, sigmask, pc, guest_addr)) {
824 return;
825 }
826
827
828
829
830
831 if (page_get_flags(guest_addr) & PAGE_VALID) {
832 maperr = false;
833 } else {
834 info->si_code = SEGV_MAPERR;
835 }
836 }
837
838 sigprocmask(SIG_SETMASK, sigmask, NULL);
839 cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
840 } else {
841 sigprocmask(SIG_SETMASK, sigmask, NULL);
842 if (info->si_code == BUS_ADRALN) {
843 cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
844 }
845 }
846
847 sync_sig = true;
848 }
849
850
851 guest_sig = host_to_target_signal(host_sig);
852 if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
853 return;
854 }
855 trace_user_host_signal(env, host_sig, guest_sig);
856
857 host_to_target_siginfo_noswap(&tinfo, info);
858 k = &ts->sigtab[guest_sig - 1];
859 k->info = tinfo;
860 k->pending = guest_sig;
861 ts->signal_pending = 1;
862
863
864
865
866
867
868 if (sync_sig) {
869 cpu->exception_index = EXCP_INTERRUPT;
870 cpu_loop_exit_restore(cpu, pc);
871 }
872
873 rewind_if_in_safe_syscall(puc);
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888 memset(sigmask, 0xff, SIGSET_T_SIZE);
889 sigdelset(sigmask, SIGSEGV);
890 sigdelset(sigmask, SIGBUS);
891
892
893 cpu_exit(thread_cpu);
894}
895
896
897
898abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
899 CPUArchState *env)
900{
901 target_stack_t oss, *uoss = NULL;
902 abi_long ret = -TARGET_EFAULT;
903
904 if (uoss_addr) {
905
906 if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
907 goto out;
908 }
909 target_save_altstack(&oss, env);
910 }
911
912 if (uss_addr) {
913 target_stack_t *uss;
914
915 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
916 goto out;
917 }
918 ret = target_restore_altstack(uss, env);
919 if (ret) {
920 goto out;
921 }
922 }
923
924 if (uoss_addr) {
925 memcpy(uoss, &oss, sizeof(oss));
926 unlock_user_struct(uoss, uoss_addr, 1);
927 uoss = NULL;
928 }
929 ret = 0;
930
931 out:
932 if (uoss) {
933 unlock_user_struct(uoss, uoss_addr, 0);
934 }
935 return ret;
936}
937
938
939int do_sigaction(int sig, const struct target_sigaction *act,
940 struct target_sigaction *oact, abi_ulong ka_restorer)
941{
942 struct target_sigaction *k;
943 struct sigaction act1;
944 int host_sig;
945 int ret = 0;
946
947 trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
948
949 if (sig < 1 || sig > TARGET_NSIG) {
950 return -TARGET_EINVAL;
951 }
952
953 if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
954 return -TARGET_EINVAL;
955 }
956
957 if (block_signals()) {
958 return -QEMU_ERESTARTSYS;
959 }
960
961 k = &sigact_table[sig - 1];
962 if (oact) {
963 __put_user(k->_sa_handler, &oact->_sa_handler);
964 __put_user(k->sa_flags, &oact->sa_flags);
965#ifdef TARGET_ARCH_HAS_SA_RESTORER
966 __put_user(k->sa_restorer, &oact->sa_restorer);
967#endif
968
969 oact->sa_mask = k->sa_mask;
970 }
971 if (act) {
972 __get_user(k->_sa_handler, &act->_sa_handler);
973 __get_user(k->sa_flags, &act->sa_flags);
974#ifdef TARGET_ARCH_HAS_SA_RESTORER
975 __get_user(k->sa_restorer, &act->sa_restorer);
976#endif
977#ifdef TARGET_ARCH_HAS_KA_RESTORER
978 k->ka_restorer = ka_restorer;
979#endif
980
981 k->sa_mask = act->sa_mask;
982
983
984 host_sig = target_to_host_signal(sig);
985 trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
986 if (host_sig > SIGRTMAX) {
987
988 qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
989 sig);
990
991
992
993
994
995
996
997
998
999
1000 return 0;
1001 }
1002 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
1003 sigfillset(&act1.sa_mask);
1004 act1.sa_flags = SA_SIGINFO;
1005 if (k->sa_flags & TARGET_SA_RESTART)
1006 act1.sa_flags |= SA_RESTART;
1007
1008
1009
1010 if (k->_sa_handler == TARGET_SIG_IGN) {
1011 act1.sa_sigaction = (void *)SIG_IGN;
1012 } else if (k->_sa_handler == TARGET_SIG_DFL) {
1013 if (fatal_signal (sig))
1014 act1.sa_sigaction = host_signal_handler;
1015 else
1016 act1.sa_sigaction = (void *)SIG_DFL;
1017 } else {
1018 act1.sa_sigaction = host_signal_handler;
1019 }
1020 ret = sigaction(host_sig, &act1, NULL);
1021 }
1022 }
1023 return ret;
1024}
1025
1026static void handle_pending_signal(CPUArchState *cpu_env, int sig,
1027 struct emulated_sigtable *k)
1028{
1029 CPUState *cpu = env_cpu(cpu_env);
1030 abi_ulong handler;
1031 sigset_t set;
1032 target_sigset_t target_old_set;
1033 struct target_sigaction *sa;
1034 TaskState *ts = cpu->opaque;
1035
1036 trace_user_handle_signal(cpu_env, sig);
1037
1038 k->pending = 0;
1039
1040 sig = gdb_handlesig(cpu, sig);
1041 if (!sig) {
1042 sa = NULL;
1043 handler = TARGET_SIG_IGN;
1044 } else {
1045 sa = &sigact_table[sig - 1];
1046 handler = sa->_sa_handler;
1047 }
1048
1049 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1050 print_taken_signal(sig, &k->info);
1051 }
1052
1053 if (handler == TARGET_SIG_DFL) {
1054
1055 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
1056 kill(getpid(),SIGSTOP);
1057 } else if (sig != TARGET_SIGCHLD &&
1058 sig != TARGET_SIGURG &&
1059 sig != TARGET_SIGWINCH &&
1060 sig != TARGET_SIGCONT) {
1061 dump_core_and_abort(sig);
1062 }
1063 } else if (handler == TARGET_SIG_IGN) {
1064
1065 } else if (handler == TARGET_SIG_ERR) {
1066 dump_core_and_abort(sig);
1067 } else {
1068
1069 sigset_t *blocked_set;
1070
1071 target_to_host_sigset(&set, &sa->sa_mask);
1072
1073
1074 if (!(sa->sa_flags & TARGET_SA_NODEFER))
1075 sigaddset(&set, target_to_host_signal(sig));
1076
1077
1078
1079 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1080
1081
1082 blocked_set = ts->in_sigsuspend ?
1083 &ts->sigsuspend_mask : &ts->signal_mask;
1084 sigorset(&ts->signal_mask, blocked_set, &set);
1085 ts->in_sigsuspend = 0;
1086
1087
1088#if defined(TARGET_I386) && !defined(TARGET_X86_64)
1089 {
1090 CPUX86State *env = cpu_env;
1091 if (env->eflags & VM_MASK)
1092 save_v86_state(env);
1093 }
1094#endif
1095
1096#if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1097 if (sa->sa_flags & TARGET_SA_SIGINFO) {
1098 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1099 } else {
1100 setup_frame(sig, sa, &target_old_set, cpu_env);
1101 }
1102#else
1103
1104 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1105#endif
1106 if (sa->sa_flags & TARGET_SA_RESETHAND) {
1107 sa->_sa_handler = TARGET_SIG_DFL;
1108 }
1109 }
1110}
1111
1112void process_pending_signals(CPUArchState *cpu_env)
1113{
1114 CPUState *cpu = env_cpu(cpu_env);
1115 int sig;
1116 TaskState *ts = cpu->opaque;
1117 sigset_t set;
1118 sigset_t *blocked_set;
1119
1120 while (qatomic_read(&ts->signal_pending)) {
1121 sigfillset(&set);
1122 sigprocmask(SIG_SETMASK, &set, 0);
1123
1124 restart_scan:
1125 sig = ts->sync_signal.pending;
1126 if (sig) {
1127
1128
1129
1130
1131
1132
1133
1134
1135 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1136 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1137 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1138 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1139 }
1140
1141 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1142 }
1143
1144 for (sig = 1; sig <= TARGET_NSIG; sig++) {
1145 blocked_set = ts->in_sigsuspend ?
1146 &ts->sigsuspend_mask : &ts->signal_mask;
1147
1148 if (ts->sigtab[sig - 1].pending &&
1149 (!sigismember(blocked_set,
1150 target_to_host_signal_table[sig]))) {
1151 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1152
1153
1154
1155 goto restart_scan;
1156 }
1157 }
1158
1159
1160
1161
1162
1163 qatomic_set(&ts->signal_pending, 0);
1164 ts->in_sigsuspend = 0;
1165 set = ts->signal_mask;
1166 sigdelset(&set, SIGSEGV);
1167 sigdelset(&set, SIGBUS);
1168 sigprocmask(SIG_SETMASK, &set, 0);
1169 }
1170 ts->in_sigsuspend = 0;
1171}
1172
1173int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
1174 target_ulong sigsize)
1175{
1176 TaskState *ts = (TaskState *)thread_cpu->opaque;
1177 sigset_t *host_set = &ts->sigsuspend_mask;
1178 target_sigset_t *target_sigset;
1179
1180 if (sigsize != sizeof(*target_sigset)) {
1181
1182 return -TARGET_EINVAL;
1183 }
1184
1185 target_sigset = lock_user(VERIFY_READ, sigset, sigsize, 1);
1186 if (!target_sigset) {
1187 return -TARGET_EFAULT;
1188 }
1189 target_to_host_sigset(host_set, target_sigset);
1190 unlock_user(target_sigset, sigset, 0);
1191
1192 *pset = host_set;
1193 return 0;
1194}
1195