1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/slab.h>
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/user.h>
18#include <linux/sched/debug.h>
19#include <linux/sched/task.h>
20#include <linux/sched/task_stack.h>
21#include <linux/sched/cputime.h>
22#include <linux/fs.h>
23#include <linux/tty.h>
24#include <linux/binfmts.h>
25#include <linux/coredump.h>
26#include <linux/security.h>
27#include <linux/syscalls.h>
28#include <linux/ptrace.h>
29#include <linux/signal.h>
30#include <linux/signalfd.h>
31#include <linux/ratelimit.h>
32#include <linux/tracehook.h>
33#include <linux/capability.h>
34#include <linux/freezer.h>
35#include <linux/pid_namespace.h>
36#include <linux/nsproxy.h>
37#include <linux/user_namespace.h>
38#include <linux/uprobes.h>
39#include <linux/compat.h>
40#include <linux/cn_proc.h>
41#include <linux/compiler.h>
42#include <linux/posix-timers.h>
43
44#define CREATE_TRACE_POINTS
45#include <trace/events/signal.h>
46
47#include <asm/param.h>
48#include <linux/uaccess.h>
49#include <asm/unistd.h>
50#include <asm/siginfo.h>
51#include <asm/cacheflush.h>
52#include "audit.h"
53
54
55
56
57
58static struct kmem_cache *sigqueue_cachep;
59
60int print_fatal_signals __read_mostly;
61
62static void __user *sig_handler(struct task_struct *t, int sig)
63{
64 return t->sighand->action[sig - 1].sa.sa_handler;
65}
66
67static int sig_handler_ignored(void __user *handler, int sig)
68{
69
70 return handler == SIG_IGN ||
71 (handler == SIG_DFL && sig_kernel_ignore(sig));
72}
73
74static int sig_task_ignored(struct task_struct *t, int sig, bool force)
75{
76 void __user *handler;
77
78 handler = sig_handler(t, sig);
79
80 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
81 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
82 return 1;
83
84 return sig_handler_ignored(handler, sig);
85}
86
87static int sig_ignored(struct task_struct *t, int sig, bool force)
88{
89
90
91
92
93
94 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
95 return 0;
96
97
98
99
100
101
102 if (t->ptrace && sig != SIGKILL)
103 return 0;
104
105 return sig_task_ignored(t, sig, force);
106}
107
108
109
110
111
112static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
113{
114 unsigned long ready;
115 long i;
116
117 switch (_NSIG_WORDS) {
118 default:
119 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
120 ready |= signal->sig[i] &~ blocked->sig[i];
121 break;
122
123 case 4: ready = signal->sig[3] &~ blocked->sig[3];
124 ready |= signal->sig[2] &~ blocked->sig[2];
125 ready |= signal->sig[1] &~ blocked->sig[1];
126 ready |= signal->sig[0] &~ blocked->sig[0];
127 break;
128
129 case 2: ready = signal->sig[1] &~ blocked->sig[1];
130 ready |= signal->sig[0] &~ blocked->sig[0];
131 break;
132
133 case 1: ready = signal->sig[0] &~ blocked->sig[0];
134 }
135 return ready != 0;
136}
137
138#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
139
140static int recalc_sigpending_tsk(struct task_struct *t)
141{
142 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
143 PENDING(&t->pending, &t->blocked) ||
144 PENDING(&t->signal->shared_pending, &t->blocked)) {
145 set_tsk_thread_flag(t, TIF_SIGPENDING);
146 return 1;
147 }
148
149
150
151
152
153 return 0;
154}
155
156
157
158
159
160void recalc_sigpending_and_wake(struct task_struct *t)
161{
162 if (recalc_sigpending_tsk(t))
163 signal_wake_up(t, 0);
164}
165
166void recalc_sigpending(void)
167{
168 if (!recalc_sigpending_tsk(current) && !freezing(current))
169 clear_thread_flag(TIF_SIGPENDING);
170
171}
172
173
174
175#define SYNCHRONOUS_MASK \
176 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
177 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
178
179int next_signal(struct sigpending *pending, sigset_t *mask)
180{
181 unsigned long i, *s, *m, x;
182 int sig = 0;
183
184 s = pending->signal.sig;
185 m = mask->sig;
186
187
188
189
190
191 x = *s &~ *m;
192 if (x) {
193 if (x & SYNCHRONOUS_MASK)
194 x &= SYNCHRONOUS_MASK;
195 sig = ffz(~x) + 1;
196 return sig;
197 }
198
199 switch (_NSIG_WORDS) {
200 default:
201 for (i = 1; i < _NSIG_WORDS; ++i) {
202 x = *++s &~ *++m;
203 if (!x)
204 continue;
205 sig = ffz(~x) + i*_NSIG_BPW + 1;
206 break;
207 }
208 break;
209
210 case 2:
211 x = s[1] &~ m[1];
212 if (!x)
213 break;
214 sig = ffz(~x) + _NSIG_BPW + 1;
215 break;
216
217 case 1:
218
219 break;
220 }
221
222 return sig;
223}
224
225static inline void print_dropped_signal(int sig)
226{
227 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
228
229 if (!print_fatal_signals)
230 return;
231
232 if (!__ratelimit(&ratelimit_state))
233 return;
234
235 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
236 current->comm, current->pid, sig);
237}
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
257{
258 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
259 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
260 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
261
262 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
263 return false;
264
265 if (mask & JOBCTL_STOP_SIGMASK)
266 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
267
268 task->jobctl |= mask;
269 return true;
270}
271
272
273
274
275
276
277
278
279
280
281
282
283
284void task_clear_jobctl_trapping(struct task_struct *task)
285{
286 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
287 task->jobctl &= ~JOBCTL_TRAPPING;
288 smp_mb();
289 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
290 }
291}
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
309{
310 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
311
312 if (mask & JOBCTL_STOP_PENDING)
313 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
314
315 task->jobctl &= ~mask;
316
317 if (!(task->jobctl & JOBCTL_PENDING_MASK))
318 task_clear_jobctl_trapping(task);
319}
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337static bool task_participate_group_stop(struct task_struct *task)
338{
339 struct signal_struct *sig = task->signal;
340 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
341
342 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
343
344 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
345
346 if (!consume)
347 return false;
348
349 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
350 sig->group_stop_count--;
351
352
353
354
355
356 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
357 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
358 return true;
359 }
360 return false;
361}
362
363
364
365
366
367
368static struct sigqueue *
369__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
370{
371 struct sigqueue *q = NULL;
372 struct user_struct *user;
373
374
375
376
377
378 rcu_read_lock();
379 user = get_uid(__task_cred(t)->user);
380 atomic_inc(&user->sigpending);
381 rcu_read_unlock();
382
383 if (override_rlimit ||
384 atomic_read(&user->sigpending) <=
385 task_rlimit(t, RLIMIT_SIGPENDING)) {
386 q = kmem_cache_alloc(sigqueue_cachep, flags);
387 } else {
388 print_dropped_signal(sig);
389 }
390
391 if (unlikely(q == NULL)) {
392 atomic_dec(&user->sigpending);
393 free_uid(user);
394 } else {
395 INIT_LIST_HEAD(&q->list);
396 q->flags = 0;
397 q->user = user;
398 }
399
400 return q;
401}
402
403static void __sigqueue_free(struct sigqueue *q)
404{
405 if (q->flags & SIGQUEUE_PREALLOC)
406 return;
407 atomic_dec(&q->user->sigpending);
408 free_uid(q->user);
409 kmem_cache_free(sigqueue_cachep, q);
410}
411
412void flush_sigqueue(struct sigpending *queue)
413{
414 struct sigqueue *q;
415
416 sigemptyset(&queue->signal);
417 while (!list_empty(&queue->list)) {
418 q = list_entry(queue->list.next, struct sigqueue , list);
419 list_del_init(&q->list);
420 __sigqueue_free(q);
421 }
422}
423
424
425
426
427void flush_signals(struct task_struct *t)
428{
429 unsigned long flags;
430
431 spin_lock_irqsave(&t->sighand->siglock, flags);
432 clear_tsk_thread_flag(t, TIF_SIGPENDING);
433 flush_sigqueue(&t->pending);
434 flush_sigqueue(&t->signal->shared_pending);
435 spin_unlock_irqrestore(&t->sighand->siglock, flags);
436}
437
438#ifdef CONFIG_POSIX_TIMERS
439static void __flush_itimer_signals(struct sigpending *pending)
440{
441 sigset_t signal, retain;
442 struct sigqueue *q, *n;
443
444 signal = pending->signal;
445 sigemptyset(&retain);
446
447 list_for_each_entry_safe(q, n, &pending->list, list) {
448 int sig = q->info.si_signo;
449
450 if (likely(q->info.si_code != SI_TIMER)) {
451 sigaddset(&retain, sig);
452 } else {
453 sigdelset(&signal, sig);
454 list_del_init(&q->list);
455 __sigqueue_free(q);
456 }
457 }
458
459 sigorsets(&pending->signal, &signal, &retain);
460}
461
462void flush_itimer_signals(void)
463{
464 struct task_struct *tsk = current;
465 unsigned long flags;
466
467 spin_lock_irqsave(&tsk->sighand->siglock, flags);
468 __flush_itimer_signals(&tsk->pending);
469 __flush_itimer_signals(&tsk->signal->shared_pending);
470 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
471}
472#endif
473
474void ignore_signals(struct task_struct *t)
475{
476 int i;
477
478 for (i = 0; i < _NSIG; ++i)
479 t->sighand->action[i].sa.sa_handler = SIG_IGN;
480
481 flush_signals(t);
482}
483
484
485
486
487
488void
489flush_signal_handlers(struct task_struct *t, int force_default)
490{
491 int i;
492 struct k_sigaction *ka = &t->sighand->action[0];
493 for (i = _NSIG ; i != 0 ; i--) {
494 if (force_default || ka->sa.sa_handler != SIG_IGN)
495 ka->sa.sa_handler = SIG_DFL;
496 ka->sa.sa_flags = 0;
497#ifdef __ARCH_HAS_SA_RESTORER
498 ka->sa.sa_restorer = NULL;
499#endif
500 sigemptyset(&ka->sa.sa_mask);
501 ka++;
502 }
503}
504
505int unhandled_signal(struct task_struct *tsk, int sig)
506{
507 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
508 if (is_global_init(tsk))
509 return 1;
510 if (handler != SIG_IGN && handler != SIG_DFL)
511 return 0;
512
513 return !tsk->ptrace;
514}
515
516static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
517 bool *resched_timer)
518{
519 struct sigqueue *q, *first = NULL;
520
521
522
523
524
525 list_for_each_entry(q, &list->list, list) {
526 if (q->info.si_signo == sig) {
527 if (first)
528 goto still_pending;
529 first = q;
530 }
531 }
532
533 sigdelset(&list->signal, sig);
534
535 if (first) {
536still_pending:
537 list_del_init(&first->list);
538 copy_siginfo(info, &first->info);
539
540 *resched_timer =
541 (first->flags & SIGQUEUE_PREALLOC) &&
542 (info->si_code == SI_TIMER) &&
543 (info->si_sys_private);
544
545 __sigqueue_free(first);
546 } else {
547
548
549
550
551
552 info->si_signo = sig;
553 info->si_errno = 0;
554 info->si_code = SI_USER;
555 info->si_pid = 0;
556 info->si_uid = 0;
557 }
558}
559
560static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
561 siginfo_t *info, bool *resched_timer)
562{
563 int sig = next_signal(pending, mask);
564
565 if (sig)
566 collect_signal(sig, pending, info, resched_timer);
567 return sig;
568}
569
570
571
572
573
574
575
576int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
577{
578 bool resched_timer = false;
579 int signr;
580
581
582
583
584 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
585 if (!signr) {
586 signr = __dequeue_signal(&tsk->signal->shared_pending,
587 mask, info, &resched_timer);
588#ifdef CONFIG_POSIX_TIMERS
589
590
591
592
593
594
595
596
597
598
599
600
601
602 if (unlikely(signr == SIGALRM)) {
603 struct hrtimer *tmr = &tsk->signal->real_timer;
604
605 if (!hrtimer_is_queued(tmr) &&
606 tsk->signal->it_real_incr != 0) {
607 hrtimer_forward(tmr, tmr->base->get_time(),
608 tsk->signal->it_real_incr);
609 hrtimer_restart(tmr);
610 }
611 }
612#endif
613 }
614
615 recalc_sigpending();
616 if (!signr)
617 return 0;
618
619 if (unlikely(sig_kernel_stop(signr))) {
620
621
622
623
624
625
626
627
628
629
630
631
632 current->jobctl |= JOBCTL_STOP_DEQUEUED;
633 }
634#ifdef CONFIG_POSIX_TIMERS
635 if (resched_timer) {
636
637
638
639
640
641
642 spin_unlock(&tsk->sighand->siglock);
643 posixtimer_rearm(info);
644 spin_lock(&tsk->sighand->siglock);
645 }
646#endif
647 return signr;
648}
649
650
651
652
653
654
655
656
657
658
659
660
661void signal_wake_up_state(struct task_struct *t, unsigned int state)
662{
663 set_tsk_thread_flag(t, TIF_SIGPENDING);
664
665
666
667
668
669
670
671 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
672 kick_process(t);
673}
674
675
676
677
678
679
680
681static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
682{
683 struct sigqueue *q, *n;
684 sigset_t m;
685
686 sigandsets(&m, mask, &s->signal);
687 if (sigisemptyset(&m))
688 return 0;
689
690 sigandnsets(&s->signal, &s->signal, mask);
691 list_for_each_entry_safe(q, n, &s->list, list) {
692 if (sigismember(mask, q->info.si_signo)) {
693 list_del_init(&q->list);
694 __sigqueue_free(q);
695 }
696 }
697 return 1;
698}
699
700static inline int is_si_special(const struct siginfo *info)
701{
702 return info <= SEND_SIG_FORCED;
703}
704
705static inline bool si_fromuser(const struct siginfo *info)
706{
707 return info == SEND_SIG_NOINFO ||
708 (!is_si_special(info) && SI_FROMUSER(info));
709}
710
711
712
713
714static int kill_ok_by_cred(struct task_struct *t)
715{
716 const struct cred *cred = current_cred();
717 const struct cred *tcred = __task_cred(t);
718
719 if (uid_eq(cred->euid, tcred->suid) ||
720 uid_eq(cred->euid, tcred->uid) ||
721 uid_eq(cred->uid, tcred->suid) ||
722 uid_eq(cred->uid, tcred->uid))
723 return 1;
724
725 if (ns_capable(tcred->user_ns, CAP_KILL))
726 return 1;
727
728 return 0;
729}
730
731
732
733
734
735static int check_kill_permission(int sig, struct siginfo *info,
736 struct task_struct *t)
737{
738 struct pid *sid;
739 int error;
740
741 if (!valid_signal(sig))
742 return -EINVAL;
743
744 if (!si_fromuser(info))
745 return 0;
746
747 error = audit_signal_info(sig, t);
748 if (error)
749 return error;
750
751 if (!same_thread_group(current, t) &&
752 !kill_ok_by_cred(t)) {
753 switch (sig) {
754 case SIGCONT:
755 sid = task_session(t);
756
757
758
759
760 if (!sid || sid == task_session(current))
761 break;
762 default:
763 return -EPERM;
764 }
765 }
766
767 return security_task_kill(t, info, sig, 0);
768}
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787static void ptrace_trap_notify(struct task_struct *t)
788{
789 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
790 assert_spin_locked(&t->sighand->siglock);
791
792 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
793 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
794}
795
796
797
798
799
800
801
802
803
804
805
806static bool prepare_signal(int sig, struct task_struct *p, bool force)
807{
808 struct signal_struct *signal = p->signal;
809 struct task_struct *t;
810 sigset_t flush;
811
812 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
813 if (!(signal->flags & SIGNAL_GROUP_EXIT))
814 return sig == SIGKILL;
815
816
817
818 } else if (sig_kernel_stop(sig)) {
819
820
821
822 siginitset(&flush, sigmask(SIGCONT));
823 flush_sigqueue_mask(&flush, &signal->shared_pending);
824 for_each_thread(p, t)
825 flush_sigqueue_mask(&flush, &t->pending);
826 } else if (sig == SIGCONT) {
827 unsigned int why;
828
829
830
831 siginitset(&flush, SIG_KERNEL_STOP_MASK);
832 flush_sigqueue_mask(&flush, &signal->shared_pending);
833 for_each_thread(p, t) {
834 flush_sigqueue_mask(&flush, &t->pending);
835 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
836 if (likely(!(t->ptrace & PT_SEIZED)))
837 wake_up_state(t, __TASK_STOPPED);
838 else
839 ptrace_trap_notify(t);
840 }
841
842
843
844
845
846
847
848
849
850 why = 0;
851 if (signal->flags & SIGNAL_STOP_STOPPED)
852 why |= SIGNAL_CLD_CONTINUED;
853 else if (signal->group_stop_count)
854 why |= SIGNAL_CLD_STOPPED;
855
856 if (why) {
857
858
859
860
861
862 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
863 signal->group_stop_count = 0;
864 signal->group_exit_code = 0;
865 }
866 }
867
868 return !sig_ignored(p, sig, force);
869}
870
871
872
873
874
875
876
877
878
879static inline int wants_signal(int sig, struct task_struct *p)
880{
881 if (sigismember(&p->blocked, sig))
882 return 0;
883 if (p->flags & PF_EXITING)
884 return 0;
885 if (sig == SIGKILL)
886 return 1;
887 if (task_is_stopped_or_traced(p))
888 return 0;
889 return task_curr(p) || !signal_pending(p);
890}
891
892static void complete_signal(int sig, struct task_struct *p, int group)
893{
894 struct signal_struct *signal = p->signal;
895 struct task_struct *t;
896
897
898
899
900
901
902
903 if (wants_signal(sig, p))
904 t = p;
905 else if (!group || thread_group_empty(p))
906
907
908
909
910 return;
911 else {
912
913
914
915 t = signal->curr_target;
916 while (!wants_signal(sig, t)) {
917 t = next_thread(t);
918 if (t == signal->curr_target)
919
920
921
922
923
924 return;
925 }
926 signal->curr_target = t;
927 }
928
929
930
931
932
933 if (sig_fatal(p, sig) &&
934 !(signal->flags & SIGNAL_GROUP_EXIT) &&
935 !sigismember(&t->real_blocked, sig) &&
936 (sig == SIGKILL || !p->ptrace)) {
937
938
939
940 if (!sig_kernel_coredump(sig)) {
941
942
943
944
945
946
947 signal->flags = SIGNAL_GROUP_EXIT;
948 signal->group_exit_code = sig;
949 signal->group_stop_count = 0;
950 t = p;
951 do {
952 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
953 sigaddset(&t->pending.signal, SIGKILL);
954 signal_wake_up(t, 1);
955 } while_each_thread(p, t);
956 return;
957 }
958 }
959
960
961
962
963
964 signal_wake_up(t, sig == SIGKILL);
965 return;
966}
967
968static inline int legacy_queue(struct sigpending *signals, int sig)
969{
970 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
971}
972
973#ifdef CONFIG_USER_NS
974static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
975{
976 if (current_user_ns() == task_cred_xxx(t, user_ns))
977 return;
978
979 if (SI_FROMKERNEL(info))
980 return;
981
982 rcu_read_lock();
983 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
984 make_kuid(current_user_ns(), info->si_uid));
985 rcu_read_unlock();
986}
987#else
988static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
989{
990 return;
991}
992#endif
993
994static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
995 int group, int from_ancestor_ns)
996{
997 struct sigpending *pending;
998 struct sigqueue *q;
999 int override_rlimit;
1000 int ret = 0, result;
1001
1002 assert_spin_locked(&t->sighand->siglock);
1003
1004 result = TRACE_SIGNAL_IGNORED;
1005 if (!prepare_signal(sig, t,
1006 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1007 goto ret;
1008
1009 pending = group ? &t->signal->shared_pending : &t->pending;
1010
1011
1012
1013
1014
1015 result = TRACE_SIGNAL_ALREADY_PENDING;
1016 if (legacy_queue(pending, sig))
1017 goto ret;
1018
1019 result = TRACE_SIGNAL_DELIVERED;
1020
1021
1022
1023
1024 if (info == SEND_SIG_FORCED)
1025 goto out_set;
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036 if (sig < SIGRTMIN)
1037 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1038 else
1039 override_rlimit = 0;
1040
1041 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1042 if (q) {
1043 list_add_tail(&q->list, &pending->list);
1044 switch ((unsigned long) info) {
1045 case (unsigned long) SEND_SIG_NOINFO:
1046 q->info.si_signo = sig;
1047 q->info.si_errno = 0;
1048 q->info.si_code = SI_USER;
1049 q->info.si_pid = task_tgid_nr_ns(current,
1050 task_active_pid_ns(t));
1051 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1052 break;
1053 case (unsigned long) SEND_SIG_PRIV:
1054 q->info.si_signo = sig;
1055 q->info.si_errno = 0;
1056 q->info.si_code = SI_KERNEL;
1057 q->info.si_pid = 0;
1058 q->info.si_uid = 0;
1059 break;
1060 default:
1061 copy_siginfo(&q->info, info);
1062 if (from_ancestor_ns)
1063 q->info.si_pid = 0;
1064 break;
1065 }
1066
1067 userns_fixup_signal_uid(&q->info, t);
1068
1069 } else if (!is_si_special(info)) {
1070 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1071
1072
1073
1074
1075
1076 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1077 ret = -EAGAIN;
1078 goto ret;
1079 } else {
1080
1081
1082
1083
1084 result = TRACE_SIGNAL_LOSE_INFO;
1085 }
1086 }
1087
1088out_set:
1089 signalfd_notify(t, sig);
1090 sigaddset(&pending->signal, sig);
1091 complete_signal(sig, t, group);
1092ret:
1093 trace_signal_generate(sig, info, t, group, result);
1094 return ret;
1095}
1096
1097static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1098 int group)
1099{
1100 int from_ancestor_ns = 0;
1101
1102#ifdef CONFIG_PID_NS
1103 from_ancestor_ns = si_fromuser(info) &&
1104 !task_pid_nr_ns(current, task_active_pid_ns(t));
1105#endif
1106
1107 return __send_signal(sig, info, t, group, from_ancestor_ns);
1108}
1109
1110static void print_fatal_signal(int signr)
1111{
1112 struct pt_regs *regs = signal_pt_regs();
1113 pr_info("potentially unexpected fatal signal %d.\n", signr);
1114
1115#if defined(__i386__) && !defined(__arch_um__)
1116 pr_info("code at %08lx: ", regs->ip);
1117 {
1118 int i;
1119 for (i = 0; i < 16; i++) {
1120 unsigned char insn;
1121
1122 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1123 break;
1124 pr_cont("%02x ", insn);
1125 }
1126 }
1127 pr_cont("\n");
1128#endif
1129 preempt_disable();
1130 show_regs(regs);
1131 preempt_enable();
1132}
1133
1134static int __init setup_print_fatal_signals(char *str)
1135{
1136 get_option (&str, &print_fatal_signals);
1137
1138 return 1;
1139}
1140
1141__setup("print-fatal-signals=", setup_print_fatal_signals);
1142
1143int
1144__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1145{
1146 return send_signal(sig, info, p, 1);
1147}
1148
1149static int
1150specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1151{
1152 return send_signal(sig, info, t, 0);
1153}
1154
1155int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1156 bool group)
1157{
1158 unsigned long flags;
1159 int ret = -ESRCH;
1160
1161 if (lock_task_sighand(p, &flags)) {
1162 ret = send_signal(sig, info, p, group);
1163 unlock_task_sighand(p, &flags);
1164 }
1165
1166 return ret;
1167}
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180int
1181force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1182{
1183 unsigned long int flags;
1184 int ret, blocked, ignored;
1185 struct k_sigaction *action;
1186
1187 spin_lock_irqsave(&t->sighand->siglock, flags);
1188 action = &t->sighand->action[sig-1];
1189 ignored = action->sa.sa_handler == SIG_IGN;
1190 blocked = sigismember(&t->blocked, sig);
1191 if (blocked || ignored) {
1192 action->sa.sa_handler = SIG_DFL;
1193 if (blocked) {
1194 sigdelset(&t->blocked, sig);
1195 recalc_sigpending_and_wake(t);
1196 }
1197 }
1198
1199
1200
1201
1202 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1203 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1204 ret = specific_send_sig_info(sig, info, t);
1205 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1206
1207 return ret;
1208}
1209
1210
1211
1212
1213int zap_other_threads(struct task_struct *p)
1214{
1215 struct task_struct *t = p;
1216 int count = 0;
1217
1218 p->signal->group_stop_count = 0;
1219
1220 while_each_thread(p, t) {
1221 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1222 count++;
1223
1224
1225 if (t->exit_state)
1226 continue;
1227 sigaddset(&t->pending.signal, SIGKILL);
1228 signal_wake_up(t, 1);
1229 }
1230
1231 return count;
1232}
1233
1234struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1235 unsigned long *flags)
1236{
1237 struct sighand_struct *sighand;
1238
1239 for (;;) {
1240
1241
1242
1243
1244 local_irq_save(*flags);
1245 rcu_read_lock();
1246 sighand = rcu_dereference(tsk->sighand);
1247 if (unlikely(sighand == NULL)) {
1248 rcu_read_unlock();
1249 local_irq_restore(*flags);
1250 break;
1251 }
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263 spin_lock(&sighand->siglock);
1264 if (likely(sighand == tsk->sighand)) {
1265 rcu_read_unlock();
1266 break;
1267 }
1268 spin_unlock(&sighand->siglock);
1269 rcu_read_unlock();
1270 local_irq_restore(*flags);
1271 }
1272
1273 return sighand;
1274}
1275
1276
1277
1278
1279int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1280{
1281 int ret;
1282
1283 rcu_read_lock();
1284 ret = check_kill_permission(sig, info, p);
1285 rcu_read_unlock();
1286
1287 if (!ret && sig)
1288 ret = do_send_sig_info(sig, info, p, true);
1289
1290 return ret;
1291}
1292
1293
1294
1295
1296
1297
1298int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1299{
1300 struct task_struct *p = NULL;
1301 int retval, success;
1302
1303 success = 0;
1304 retval = -ESRCH;
1305 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1306 int err = group_send_sig_info(sig, info, p);
1307 success |= !err;
1308 retval = err;
1309 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1310 return success ? 0 : retval;
1311}
1312
1313int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1314{
1315 int error = -ESRCH;
1316 struct task_struct *p;
1317
1318 for (;;) {
1319 rcu_read_lock();
1320 p = pid_task(pid, PIDTYPE_PID);
1321 if (p)
1322 error = group_send_sig_info(sig, info, p);
1323 rcu_read_unlock();
1324 if (likely(!p || error != -ESRCH))
1325 return error;
1326
1327
1328
1329
1330
1331
1332 }
1333}
1334
1335static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1336{
1337 int error;
1338 rcu_read_lock();
1339 error = kill_pid_info(sig, info, find_vpid(pid));
1340 rcu_read_unlock();
1341 return error;
1342}
1343
1344static int kill_as_cred_perm(const struct cred *cred,
1345 struct task_struct *target)
1346{
1347 const struct cred *pcred = __task_cred(target);
1348 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1349 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1350 return 0;
1351 return 1;
1352}
1353
1354
1355int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1356 const struct cred *cred, u32 secid)
1357{
1358 int ret = -EINVAL;
1359 struct task_struct *p;
1360 unsigned long flags;
1361
1362 if (!valid_signal(sig))
1363 return ret;
1364
1365 rcu_read_lock();
1366 p = pid_task(pid, PIDTYPE_PID);
1367 if (!p) {
1368 ret = -ESRCH;
1369 goto out_unlock;
1370 }
1371 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1372 ret = -EPERM;
1373 goto out_unlock;
1374 }
1375 ret = security_task_kill(p, info, sig, secid);
1376 if (ret)
1377 goto out_unlock;
1378
1379 if (sig) {
1380 if (lock_task_sighand(p, &flags)) {
1381 ret = __send_signal(sig, info, p, 1, 0);
1382 unlock_task_sighand(p, &flags);
1383 } else
1384 ret = -ESRCH;
1385 }
1386out_unlock:
1387 rcu_read_unlock();
1388 return ret;
1389}
1390EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1391
1392
1393
1394
1395
1396
1397
1398
1399static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1400{
1401 int ret;
1402
1403 if (pid > 0) {
1404 rcu_read_lock();
1405 ret = kill_pid_info(sig, info, find_vpid(pid));
1406 rcu_read_unlock();
1407 return ret;
1408 }
1409
1410
1411 if (pid == INT_MIN)
1412 return -ESRCH;
1413
1414 read_lock(&tasklist_lock);
1415 if (pid != -1) {
1416 ret = __kill_pgrp_info(sig, info,
1417 pid ? find_vpid(-pid) : task_pgrp(current));
1418 } else {
1419 int retval = 0, count = 0;
1420 struct task_struct * p;
1421
1422 for_each_process(p) {
1423 if (task_pid_vnr(p) > 1 &&
1424 !same_thread_group(p, current)) {
1425 int err = group_send_sig_info(sig, info, p);
1426 ++count;
1427 if (err != -EPERM)
1428 retval = err;
1429 }
1430 }
1431 ret = count ? retval : -ESRCH;
1432 }
1433 read_unlock(&tasklist_lock);
1434
1435 return ret;
1436}
1437
1438
1439
1440
1441
1442int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1443{
1444
1445
1446
1447
1448 if (!valid_signal(sig))
1449 return -EINVAL;
1450
1451 return do_send_sig_info(sig, info, p, false);
1452}
1453
1454#define __si_special(priv) \
1455 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1456
1457int
1458send_sig(int sig, struct task_struct *p, int priv)
1459{
1460 return send_sig_info(sig, __si_special(priv), p);
1461}
1462
1463void
1464force_sig(int sig, struct task_struct *p)
1465{
1466 force_sig_info(sig, SEND_SIG_PRIV, p);
1467}
1468
1469
1470
1471
1472
1473
1474
1475int
1476force_sigsegv(int sig, struct task_struct *p)
1477{
1478 if (sig == SIGSEGV) {
1479 unsigned long flags;
1480 spin_lock_irqsave(&p->sighand->siglock, flags);
1481 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1482 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1483 }
1484 force_sig(SIGSEGV, p);
1485 return 0;
1486}
1487
1488int kill_pgrp(struct pid *pid, int sig, int priv)
1489{
1490 int ret;
1491
1492 read_lock(&tasklist_lock);
1493 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1494 read_unlock(&tasklist_lock);
1495
1496 return ret;
1497}
1498EXPORT_SYMBOL(kill_pgrp);
1499
1500int kill_pid(struct pid *pid, int sig, int priv)
1501{
1502 return kill_pid_info(sig, __si_special(priv), pid);
1503}
1504EXPORT_SYMBOL(kill_pid);
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515struct sigqueue *sigqueue_alloc(void)
1516{
1517 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1518
1519 if (q)
1520 q->flags |= SIGQUEUE_PREALLOC;
1521
1522 return q;
1523}
1524
1525void sigqueue_free(struct sigqueue *q)
1526{
1527 unsigned long flags;
1528 spinlock_t *lock = ¤t->sighand->siglock;
1529
1530 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1531
1532
1533
1534
1535
1536 spin_lock_irqsave(lock, flags);
1537 q->flags &= ~SIGQUEUE_PREALLOC;
1538
1539
1540
1541
1542 if (!list_empty(&q->list))
1543 q = NULL;
1544 spin_unlock_irqrestore(lock, flags);
1545
1546 if (q)
1547 __sigqueue_free(q);
1548}
1549
1550int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1551{
1552 int sig = q->info.si_signo;
1553 struct sigpending *pending;
1554 unsigned long flags;
1555 int ret, result;
1556
1557 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1558
1559 ret = -1;
1560 if (!likely(lock_task_sighand(t, &flags)))
1561 goto ret;
1562
1563 ret = 1;
1564 result = TRACE_SIGNAL_IGNORED;
1565 if (!prepare_signal(sig, t, false))
1566 goto out;
1567
1568 ret = 0;
1569 if (unlikely(!list_empty(&q->list))) {
1570
1571
1572
1573
1574 BUG_ON(q->info.si_code != SI_TIMER);
1575 q->info.si_overrun++;
1576 result = TRACE_SIGNAL_ALREADY_PENDING;
1577 goto out;
1578 }
1579 q->info.si_overrun = 0;
1580
1581 signalfd_notify(t, sig);
1582 pending = group ? &t->signal->shared_pending : &t->pending;
1583 list_add_tail(&q->list, &pending->list);
1584 sigaddset(&pending->signal, sig);
1585 complete_signal(sig, t, group);
1586 result = TRACE_SIGNAL_DELIVERED;
1587out:
1588 trace_signal_generate(sig, &q->info, t, group, result);
1589 unlock_task_sighand(t, &flags);
1590ret:
1591 return ret;
1592}
1593
1594
1595
1596
1597
1598
1599
1600
1601bool do_notify_parent(struct task_struct *tsk, int sig)
1602{
1603 struct siginfo info;
1604 unsigned long flags;
1605 struct sighand_struct *psig;
1606 bool autoreap = false;
1607 u64 utime, stime;
1608
1609 BUG_ON(sig == -1);
1610
1611
1612 BUG_ON(task_is_stopped_or_traced(tsk));
1613
1614 BUG_ON(!tsk->ptrace &&
1615 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1616
1617 if (sig != SIGCHLD) {
1618
1619
1620
1621
1622 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1623 sig = SIGCHLD;
1624 }
1625
1626 info.si_signo = sig;
1627 info.si_errno = 0;
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639 rcu_read_lock();
1640 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1641 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1642 task_uid(tsk));
1643 rcu_read_unlock();
1644
1645 task_cputime(tsk, &utime, &stime);
1646 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1647 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1648
1649 info.si_status = tsk->exit_code & 0x7f;
1650 if (tsk->exit_code & 0x80)
1651 info.si_code = CLD_DUMPED;
1652 else if (tsk->exit_code & 0x7f)
1653 info.si_code = CLD_KILLED;
1654 else {
1655 info.si_code = CLD_EXITED;
1656 info.si_status = tsk->exit_code >> 8;
1657 }
1658
1659 psig = tsk->parent->sighand;
1660 spin_lock_irqsave(&psig->siglock, flags);
1661 if (!tsk->ptrace && sig == SIGCHLD &&
1662 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1663 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679 autoreap = true;
1680 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1681 sig = 0;
1682 }
1683 if (valid_signal(sig) && sig)
1684 __group_send_sig_info(sig, &info, tsk->parent);
1685 __wake_up_parent(tsk, tsk->parent);
1686 spin_unlock_irqrestore(&psig->siglock, flags);
1687
1688 return autoreap;
1689}
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704static void do_notify_parent_cldstop(struct task_struct *tsk,
1705 bool for_ptracer, int why)
1706{
1707 struct siginfo info;
1708 unsigned long flags;
1709 struct task_struct *parent;
1710 struct sighand_struct *sighand;
1711 u64 utime, stime;
1712
1713 if (for_ptracer) {
1714 parent = tsk->parent;
1715 } else {
1716 tsk = tsk->group_leader;
1717 parent = tsk->real_parent;
1718 }
1719
1720 info.si_signo = SIGCHLD;
1721 info.si_errno = 0;
1722
1723
1724
1725 rcu_read_lock();
1726 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1727 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1728 rcu_read_unlock();
1729
1730 task_cputime(tsk, &utime, &stime);
1731 info.si_utime = nsec_to_clock_t(utime);
1732 info.si_stime = nsec_to_clock_t(stime);
1733
1734 info.si_code = why;
1735 switch (why) {
1736 case CLD_CONTINUED:
1737 info.si_status = SIGCONT;
1738 break;
1739 case CLD_STOPPED:
1740 info.si_status = tsk->signal->group_exit_code & 0x7f;
1741 break;
1742 case CLD_TRAPPED:
1743 info.si_status = tsk->exit_code & 0x7f;
1744 break;
1745 default:
1746 BUG();
1747 }
1748
1749 sighand = parent->sighand;
1750 spin_lock_irqsave(&sighand->siglock, flags);
1751 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1752 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1753 __group_send_sig_info(SIGCHLD, &info, parent);
1754
1755
1756
1757 __wake_up_parent(tsk, parent);
1758 spin_unlock_irqrestore(&sighand->siglock, flags);
1759}
1760
1761static inline int may_ptrace_stop(void)
1762{
1763 if (!likely(current->ptrace))
1764 return 0;
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778 if (unlikely(current->mm->core_state) &&
1779 unlikely(current->mm == current->parent->mm))
1780 return 0;
1781
1782 return 1;
1783}
1784
1785
1786
1787
1788
1789static int sigkill_pending(struct task_struct *tsk)
1790{
1791 return sigismember(&tsk->pending.signal, SIGKILL) ||
1792 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1793}
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1807 __releases(¤t->sighand->siglock)
1808 __acquires(¤t->sighand->siglock)
1809{
1810 bool gstop_done = false;
1811
1812 if (arch_ptrace_stop_needed(exit_code, info)) {
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824 spin_unlock_irq(¤t->sighand->siglock);
1825 arch_ptrace_stop(exit_code, info);
1826 spin_lock_irq(¤t->sighand->siglock);
1827 if (sigkill_pending(current))
1828 return;
1829 }
1830
1831
1832
1833
1834
1835
1836
1837
1838 set_current_state(TASK_TRACED);
1839
1840 current->last_siginfo = info;
1841 current->exit_code = exit_code;
1842
1843
1844
1845
1846
1847
1848
1849
1850 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1851 gstop_done = task_participate_group_stop(current);
1852
1853
1854 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1855 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1856 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1857
1858
1859 task_clear_jobctl_trapping(current);
1860
1861 spin_unlock_irq(¤t->sighand->siglock);
1862 read_lock(&tasklist_lock);
1863 if (may_ptrace_stop()) {
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874 do_notify_parent_cldstop(current, true, why);
1875 if (gstop_done && ptrace_reparented(current))
1876 do_notify_parent_cldstop(current, false, why);
1877
1878
1879
1880
1881
1882
1883
1884 preempt_disable();
1885 read_unlock(&tasklist_lock);
1886 preempt_enable_no_resched();
1887 freezable_schedule();
1888 } else {
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899 if (gstop_done)
1900 do_notify_parent_cldstop(current, false, why);
1901
1902
1903 __set_current_state(TASK_RUNNING);
1904 if (clear_code)
1905 current->exit_code = 0;
1906 read_unlock(&tasklist_lock);
1907 }
1908
1909
1910
1911
1912
1913
1914 spin_lock_irq(¤t->sighand->siglock);
1915 current->last_siginfo = NULL;
1916
1917
1918 current->jobctl &= ~JOBCTL_LISTENING;
1919
1920
1921
1922
1923
1924
1925 recalc_sigpending_tsk(current);
1926}
1927
1928static void ptrace_do_notify(int signr, int exit_code, int why)
1929{
1930 siginfo_t info;
1931
1932 memset(&info, 0, sizeof info);
1933 info.si_signo = signr;
1934 info.si_code = exit_code;
1935 info.si_pid = task_pid_vnr(current);
1936 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1937
1938
1939 ptrace_stop(exit_code, why, 1, &info);
1940}
1941
1942void ptrace_notify(int exit_code)
1943{
1944 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1945 if (unlikely(current->task_works))
1946 task_work_run();
1947
1948 spin_lock_irq(¤t->sighand->siglock);
1949 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1950 spin_unlock_irq(¤t->sighand->siglock);
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975static bool do_signal_stop(int signr)
1976 __releases(¤t->sighand->siglock)
1977{
1978 struct signal_struct *sig = current->signal;
1979
1980 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1981 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1982 struct task_struct *t;
1983
1984
1985 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1986
1987 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1988 unlikely(signal_group_exit(sig)))
1989 return false;
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2010 sig->group_exit_code = signr;
2011
2012 sig->group_stop_count = 0;
2013
2014 if (task_set_jobctl_pending(current, signr | gstop))
2015 sig->group_stop_count++;
2016
2017 t = current;
2018 while_each_thread(current, t) {
2019
2020
2021
2022
2023
2024 if (!task_is_stopped(t) &&
2025 task_set_jobctl_pending(t, signr | gstop)) {
2026 sig->group_stop_count++;
2027 if (likely(!(t->ptrace & PT_SEIZED)))
2028 signal_wake_up(t, 0);
2029 else
2030 ptrace_trap_notify(t);
2031 }
2032 }
2033 }
2034
2035 if (likely(!current->ptrace)) {
2036 int notify = 0;
2037
2038
2039
2040
2041
2042
2043 if (task_participate_group_stop(current))
2044 notify = CLD_STOPPED;
2045
2046 __set_current_state(TASK_STOPPED);
2047 spin_unlock_irq(¤t->sighand->siglock);
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058 if (notify) {
2059 read_lock(&tasklist_lock);
2060 do_notify_parent_cldstop(current, false, notify);
2061 read_unlock(&tasklist_lock);
2062 }
2063
2064
2065 freezable_schedule();
2066 return true;
2067 } else {
2068
2069
2070
2071
2072 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2073 return false;
2074 }
2075}
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092static void do_jobctl_trap(void)
2093{
2094 struct signal_struct *signal = current->signal;
2095 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2096
2097 if (current->ptrace & PT_SEIZED) {
2098 if (!signal->group_stop_count &&
2099 !(signal->flags & SIGNAL_STOP_STOPPED))
2100 signr = SIGTRAP;
2101 WARN_ON_ONCE(!signr);
2102 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2103 CLD_STOPPED);
2104 } else {
2105 WARN_ON_ONCE(!signr);
2106 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2107 current->exit_code = 0;
2108 }
2109}
2110
2111static int ptrace_signal(int signr, siginfo_t *info)
2112{
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2123 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2124
2125
2126 signr = current->exit_code;
2127 if (signr == 0)
2128 return signr;
2129
2130 current->exit_code = 0;
2131
2132
2133
2134
2135
2136
2137
2138 if (signr != info->si_signo) {
2139 info->si_signo = signr;
2140 info->si_errno = 0;
2141 info->si_code = SI_USER;
2142 rcu_read_lock();
2143 info->si_pid = task_pid_vnr(current->parent);
2144 info->si_uid = from_kuid_munged(current_user_ns(),
2145 task_uid(current->parent));
2146 rcu_read_unlock();
2147 }
2148
2149
2150 if (sigismember(¤t->blocked, signr)) {
2151 specific_send_sig_info(signr, info, current);
2152 signr = 0;
2153 }
2154
2155 return signr;
2156}
2157
2158int get_signal(struct ksignal *ksig)
2159{
2160 struct sighand_struct *sighand = current->sighand;
2161 struct signal_struct *signal = current->signal;
2162 int signr;
2163
2164 if (unlikely(current->task_works))
2165 task_work_run();
2166
2167 if (unlikely(uprobe_deny_signal()))
2168 return 0;
2169
2170
2171
2172
2173
2174
2175 try_to_freeze();
2176
2177relock:
2178 spin_lock_irq(&sighand->siglock);
2179
2180
2181
2182
2183
2184 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2185 int why;
2186
2187 if (signal->flags & SIGNAL_CLD_CONTINUED)
2188 why = CLD_CONTINUED;
2189 else
2190 why = CLD_STOPPED;
2191
2192 signal->flags &= ~SIGNAL_CLD_MASK;
2193
2194 spin_unlock_irq(&sighand->siglock);
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204 read_lock(&tasklist_lock);
2205 do_notify_parent_cldstop(current, false, why);
2206
2207 if (ptrace_reparented(current->group_leader))
2208 do_notify_parent_cldstop(current->group_leader,
2209 true, why);
2210 read_unlock(&tasklist_lock);
2211
2212 goto relock;
2213 }
2214
2215 for (;;) {
2216 struct k_sigaction *ka;
2217
2218 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2219 do_signal_stop(0))
2220 goto relock;
2221
2222 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2223 do_jobctl_trap();
2224 spin_unlock_irq(&sighand->siglock);
2225 goto relock;
2226 }
2227
2228 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2229
2230 if (!signr)
2231 break;
2232
2233 if (unlikely(current->ptrace) && signr != SIGKILL) {
2234 signr = ptrace_signal(signr, &ksig->info);
2235 if (!signr)
2236 continue;
2237 }
2238
2239 ka = &sighand->action[signr-1];
2240
2241
2242 trace_signal_deliver(signr, &ksig->info, ka);
2243
2244 if (ka->sa.sa_handler == SIG_IGN)
2245 continue;
2246 if (ka->sa.sa_handler != SIG_DFL) {
2247
2248 ksig->ka = *ka;
2249
2250 if (ka->sa.sa_flags & SA_ONESHOT)
2251 ka->sa.sa_handler = SIG_DFL;
2252
2253 break;
2254 }
2255
2256
2257
2258
2259 if (sig_kernel_ignore(signr))
2260 continue;
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2273 !sig_kernel_only(signr))
2274 continue;
2275
2276 if (sig_kernel_stop(signr)) {
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287 if (signr != SIGSTOP) {
2288 spin_unlock_irq(&sighand->siglock);
2289
2290
2291
2292 if (is_current_pgrp_orphaned())
2293 goto relock;
2294
2295 spin_lock_irq(&sighand->siglock);
2296 }
2297
2298 if (likely(do_signal_stop(ksig->info.si_signo))) {
2299
2300 goto relock;
2301 }
2302
2303
2304
2305
2306
2307 continue;
2308 }
2309
2310 spin_unlock_irq(&sighand->siglock);
2311
2312
2313
2314
2315 current->flags |= PF_SIGNALED;
2316
2317 if (sig_kernel_coredump(signr)) {
2318 if (print_fatal_signals)
2319 print_fatal_signal(ksig->info.si_signo);
2320 proc_coredump_connector(current);
2321
2322
2323
2324
2325
2326
2327
2328
2329 do_coredump(&ksig->info);
2330 }
2331
2332
2333
2334
2335 do_group_exit(ksig->info.si_signo);
2336
2337 }
2338 spin_unlock_irq(&sighand->siglock);
2339
2340 ksig->sig = signr;
2341 return ksig->sig > 0;
2342}
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354static void signal_delivered(struct ksignal *ksig, int stepping)
2355{
2356 sigset_t blocked;
2357
2358
2359
2360
2361
2362 clear_restore_sigmask();
2363
2364 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2365 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2366 sigaddset(&blocked, ksig->sig);
2367 set_current_blocked(&blocked);
2368 tracehook_signal_handler(stepping);
2369}
2370
2371void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2372{
2373 if (failed)
2374 force_sigsegv(ksig->sig, current);
2375 else
2376 signal_delivered(ksig, stepping);
2377}
2378
2379
2380
2381
2382
2383
2384static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2385{
2386 sigset_t retarget;
2387 struct task_struct *t;
2388
2389 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2390 if (sigisemptyset(&retarget))
2391 return;
2392
2393 t = tsk;
2394 while_each_thread(tsk, t) {
2395 if (t->flags & PF_EXITING)
2396 continue;
2397
2398 if (!has_pending_signals(&retarget, &t->blocked))
2399 continue;
2400
2401 sigandsets(&retarget, &retarget, &t->blocked);
2402
2403 if (!signal_pending(t))
2404 signal_wake_up(t, 0);
2405
2406 if (sigisemptyset(&retarget))
2407 break;
2408 }
2409}
2410
2411void exit_signals(struct task_struct *tsk)
2412{
2413 int group_stop = 0;
2414 sigset_t unblocked;
2415
2416
2417
2418
2419
2420 cgroup_threadgroup_change_begin(tsk);
2421
2422 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2423 tsk->flags |= PF_EXITING;
2424 cgroup_threadgroup_change_end(tsk);
2425 return;
2426 }
2427
2428 spin_lock_irq(&tsk->sighand->siglock);
2429
2430
2431
2432
2433 tsk->flags |= PF_EXITING;
2434
2435 cgroup_threadgroup_change_end(tsk);
2436
2437 if (!signal_pending(tsk))
2438 goto out;
2439
2440 unblocked = tsk->blocked;
2441 signotset(&unblocked);
2442 retarget_shared_pending(tsk, &unblocked);
2443
2444 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2445 task_participate_group_stop(tsk))
2446 group_stop = CLD_STOPPED;
2447out:
2448 spin_unlock_irq(&tsk->sighand->siglock);
2449
2450
2451
2452
2453
2454 if (unlikely(group_stop)) {
2455 read_lock(&tasklist_lock);
2456 do_notify_parent_cldstop(tsk, false, group_stop);
2457 read_unlock(&tasklist_lock);
2458 }
2459}
2460
2461EXPORT_SYMBOL(recalc_sigpending);
2462EXPORT_SYMBOL_GPL(dequeue_signal);
2463EXPORT_SYMBOL(flush_signals);
2464EXPORT_SYMBOL(force_sig);
2465EXPORT_SYMBOL(send_sig);
2466EXPORT_SYMBOL(send_sig_info);
2467EXPORT_SYMBOL(sigprocmask);
2468
2469
2470
2471
2472
2473
2474
2475
2476SYSCALL_DEFINE0(restart_syscall)
2477{
2478 struct restart_block *restart = ¤t->restart_block;
2479 return restart->fn(restart);
2480}
2481
2482long do_no_restart_syscall(struct restart_block *param)
2483{
2484 return -EINTR;
2485}
2486
2487static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2488{
2489 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2490 sigset_t newblocked;
2491
2492 sigandnsets(&newblocked, newset, ¤t->blocked);
2493 retarget_shared_pending(tsk, &newblocked);
2494 }
2495 tsk->blocked = *newset;
2496 recalc_sigpending();
2497}
2498
2499
2500
2501
2502
2503
2504
2505
2506void set_current_blocked(sigset_t *newset)
2507{
2508 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2509 __set_current_blocked(newset);
2510}
2511
2512void __set_current_blocked(const sigset_t *newset)
2513{
2514 struct task_struct *tsk = current;
2515
2516
2517
2518
2519
2520 if (sigequalsets(&tsk->blocked, newset))
2521 return;
2522
2523 spin_lock_irq(&tsk->sighand->siglock);
2524 __set_task_blocked(tsk, newset);
2525 spin_unlock_irq(&tsk->sighand->siglock);
2526}
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2537{
2538 struct task_struct *tsk = current;
2539 sigset_t newset;
2540
2541
2542 if (oldset)
2543 *oldset = tsk->blocked;
2544
2545 switch (how) {
2546 case SIG_BLOCK:
2547 sigorsets(&newset, &tsk->blocked, set);
2548 break;
2549 case SIG_UNBLOCK:
2550 sigandnsets(&newset, &tsk->blocked, set);
2551 break;
2552 case SIG_SETMASK:
2553 newset = *set;
2554 break;
2555 default:
2556 return -EINVAL;
2557 }
2558
2559 __set_current_blocked(&newset);
2560 return 0;
2561}
2562
2563
2564
2565
2566
2567
2568
2569
2570SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2571 sigset_t __user *, oset, size_t, sigsetsize)
2572{
2573 sigset_t old_set, new_set;
2574 int error;
2575
2576
2577 if (sigsetsize != sizeof(sigset_t))
2578 return -EINVAL;
2579
2580 old_set = current->blocked;
2581
2582 if (nset) {
2583 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2584 return -EFAULT;
2585 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2586
2587 error = sigprocmask(how, &new_set, NULL);
2588 if (error)
2589 return error;
2590 }
2591
2592 if (oset) {
2593 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2594 return -EFAULT;
2595 }
2596
2597 return 0;
2598}
2599
2600#ifdef CONFIG_COMPAT
2601COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2602 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2603{
2604 sigset_t old_set = current->blocked;
2605
2606
2607 if (sigsetsize != sizeof(sigset_t))
2608 return -EINVAL;
2609
2610 if (nset) {
2611 sigset_t new_set;
2612 int error;
2613 if (get_compat_sigset(&new_set, nset))
2614 return -EFAULT;
2615 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2616
2617 error = sigprocmask(how, &new_set, NULL);
2618 if (error)
2619 return error;
2620 }
2621 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
2622}
2623#endif
2624
2625static int do_sigpending(sigset_t *set)
2626{
2627 spin_lock_irq(¤t->sighand->siglock);
2628 sigorsets(set, ¤t->pending.signal,
2629 ¤t->signal->shared_pending.signal);
2630 spin_unlock_irq(¤t->sighand->siglock);
2631
2632
2633 sigandsets(set, ¤t->blocked, set);
2634 return 0;
2635}
2636
2637
2638
2639
2640
2641
2642
2643SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2644{
2645 sigset_t set;
2646 int err;
2647
2648 if (sigsetsize > sizeof(*uset))
2649 return -EINVAL;
2650
2651 err = do_sigpending(&set);
2652 if (!err && copy_to_user(uset, &set, sigsetsize))
2653 err = -EFAULT;
2654 return err;
2655}
2656
2657#ifdef CONFIG_COMPAT
2658COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2659 compat_size_t, sigsetsize)
2660{
2661 sigset_t set;
2662 int err;
2663
2664 if (sigsetsize > sizeof(*uset))
2665 return -EINVAL;
2666
2667 err = do_sigpending(&set);
2668 if (!err)
2669 err = put_compat_sigset(uset, &set, sigsetsize);
2670 return err;
2671}
2672#endif
2673
2674enum siginfo_layout siginfo_layout(int sig, int si_code)
2675{
2676 enum siginfo_layout layout = SIL_KILL;
2677 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2678 static const struct {
2679 unsigned char limit, layout;
2680 } filter[] = {
2681 [SIGILL] = { NSIGILL, SIL_FAULT },
2682 [SIGFPE] = { NSIGFPE, SIL_FAULT },
2683 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2684 [SIGBUS] = { NSIGBUS, SIL_FAULT },
2685 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
2686#if defined(SIGEMT) && defined(NSIGEMT)
2687 [SIGEMT] = { NSIGEMT, SIL_FAULT },
2688#endif
2689 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
2690 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
2691#ifdef __ARCH_SIGSYS
2692 [SIGSYS] = { NSIGSYS, SIL_SYS },
2693#endif
2694 };
2695 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit))
2696 layout = filter[sig].layout;
2697 else if (si_code <= NSIGPOLL)
2698 layout = SIL_POLL;
2699 } else {
2700 if (si_code == SI_TIMER)
2701 layout = SIL_TIMER;
2702 else if (si_code == SI_SIGIO)
2703 layout = SIL_POLL;
2704 else if (si_code < 0)
2705 layout = SIL_RT;
2706
2707#ifdef TRAP_FIXME
2708 if ((sig == SIGTRAP) && (si_code == TRAP_FIXME))
2709 layout = SIL_FAULT;
2710#endif
2711#ifdef FPE_FIXME
2712 if ((sig == SIGFPE) && (si_code == FPE_FIXME))
2713 layout = SIL_FAULT;
2714#endif
2715 }
2716 return layout;
2717}
2718
2719#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2720
2721int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2722{
2723 int err;
2724
2725 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2726 return -EFAULT;
2727 if (from->si_code < 0)
2728 return __copy_to_user(to, from, sizeof(siginfo_t))
2729 ? -EFAULT : 0;
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739 err = __put_user(from->si_signo, &to->si_signo);
2740 err |= __put_user(from->si_errno, &to->si_errno);
2741 err |= __put_user(from->si_code, &to->si_code);
2742 switch (siginfo_layout(from->si_signo, from->si_code)) {
2743 case SIL_KILL:
2744 err |= __put_user(from->si_pid, &to->si_pid);
2745 err |= __put_user(from->si_uid, &to->si_uid);
2746 break;
2747 case SIL_TIMER:
2748
2749 break;
2750 case SIL_POLL:
2751 err |= __put_user(from->si_band, &to->si_band);
2752 err |= __put_user(from->si_fd, &to->si_fd);
2753 break;
2754 case SIL_FAULT:
2755 err |= __put_user(from->si_addr, &to->si_addr);
2756#ifdef __ARCH_SI_TRAPNO
2757 err |= __put_user(from->si_trapno, &to->si_trapno);
2758#endif
2759#ifdef BUS_MCEERR_AO
2760
2761
2762
2763
2764 if (from->si_signo == SIGBUS &&
2765 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2766 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2767#endif
2768#ifdef SEGV_BNDERR
2769 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2770 err |= __put_user(from->si_lower, &to->si_lower);
2771 err |= __put_user(from->si_upper, &to->si_upper);
2772 }
2773#endif
2774#ifdef SEGV_PKUERR
2775 if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2776 err |= __put_user(from->si_pkey, &to->si_pkey);
2777#endif
2778 break;
2779 case SIL_CHLD:
2780 err |= __put_user(from->si_pid, &to->si_pid);
2781 err |= __put_user(from->si_uid, &to->si_uid);
2782 err |= __put_user(from->si_status, &to->si_status);
2783 err |= __put_user(from->si_utime, &to->si_utime);
2784 err |= __put_user(from->si_stime, &to->si_stime);
2785 break;
2786 case SIL_RT:
2787 err |= __put_user(from->si_pid, &to->si_pid);
2788 err |= __put_user(from->si_uid, &to->si_uid);
2789 err |= __put_user(from->si_ptr, &to->si_ptr);
2790 break;
2791#ifdef __ARCH_SIGSYS
2792 case SIL_SYS:
2793 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2794 err |= __put_user(from->si_syscall, &to->si_syscall);
2795 err |= __put_user(from->si_arch, &to->si_arch);
2796 break;
2797#endif
2798 }
2799 return err;
2800}
2801
2802#endif
2803
2804
2805
2806
2807
2808
2809
2810static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2811 const struct timespec *ts)
2812{
2813 ktime_t *to = NULL, timeout = KTIME_MAX;
2814 struct task_struct *tsk = current;
2815 sigset_t mask = *which;
2816 int sig, ret = 0;
2817
2818 if (ts) {
2819 if (!timespec_valid(ts))
2820 return -EINVAL;
2821 timeout = timespec_to_ktime(*ts);
2822 to = &timeout;
2823 }
2824
2825
2826
2827
2828 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2829 signotset(&mask);
2830
2831 spin_lock_irq(&tsk->sighand->siglock);
2832 sig = dequeue_signal(tsk, &mask, info);
2833 if (!sig && timeout) {
2834
2835
2836
2837
2838
2839
2840 tsk->real_blocked = tsk->blocked;
2841 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2842 recalc_sigpending();
2843 spin_unlock_irq(&tsk->sighand->siglock);
2844
2845 __set_current_state(TASK_INTERRUPTIBLE);
2846 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2847 HRTIMER_MODE_REL);
2848 spin_lock_irq(&tsk->sighand->siglock);
2849 __set_task_blocked(tsk, &tsk->real_blocked);
2850 sigemptyset(&tsk->real_blocked);
2851 sig = dequeue_signal(tsk, &mask, info);
2852 }
2853 spin_unlock_irq(&tsk->sighand->siglock);
2854
2855 if (sig)
2856 return sig;
2857 return ret ? -EINTR : -EAGAIN;
2858}
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2869 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2870 size_t, sigsetsize)
2871{
2872 sigset_t these;
2873 struct timespec ts;
2874 siginfo_t info;
2875 int ret;
2876
2877
2878 if (sigsetsize != sizeof(sigset_t))
2879 return -EINVAL;
2880
2881 if (copy_from_user(&these, uthese, sizeof(these)))
2882 return -EFAULT;
2883
2884 if (uts) {
2885 if (copy_from_user(&ts, uts, sizeof(ts)))
2886 return -EFAULT;
2887 }
2888
2889 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2890
2891 if (ret > 0 && uinfo) {
2892 if (copy_siginfo_to_user(uinfo, &info))
2893 ret = -EFAULT;
2894 }
2895
2896 return ret;
2897}
2898
2899#ifdef CONFIG_COMPAT
2900COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
2901 struct compat_siginfo __user *, uinfo,
2902 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
2903{
2904 sigset_t s;
2905 struct timespec t;
2906 siginfo_t info;
2907 long ret;
2908
2909 if (sigsetsize != sizeof(sigset_t))
2910 return -EINVAL;
2911
2912 if (get_compat_sigset(&s, uthese))
2913 return -EFAULT;
2914
2915 if (uts) {
2916 if (compat_get_timespec(&t, uts))
2917 return -EFAULT;
2918 }
2919
2920 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
2921
2922 if (ret > 0 && uinfo) {
2923 if (copy_siginfo_to_user32(uinfo, &info))
2924 ret = -EFAULT;
2925 }
2926
2927 return ret;
2928}
2929#endif
2930
2931
2932
2933
2934
2935
2936SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2937{
2938 struct siginfo info;
2939
2940 info.si_signo = sig;
2941 info.si_errno = 0;
2942 info.si_code = SI_USER;
2943 info.si_pid = task_tgid_vnr(current);
2944 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2945
2946 return kill_something_info(sig, &info, pid);
2947}
2948
2949static int
2950do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2951{
2952 struct task_struct *p;
2953 int error = -ESRCH;
2954
2955 rcu_read_lock();
2956 p = find_task_by_vpid(pid);
2957 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2958 error = check_kill_permission(sig, info, p);
2959
2960
2961
2962
2963 if (!error && sig) {
2964 error = do_send_sig_info(sig, info, p, false);
2965
2966
2967
2968
2969
2970 if (unlikely(error == -ESRCH))
2971 error = 0;
2972 }
2973 }
2974 rcu_read_unlock();
2975
2976 return error;
2977}
2978
2979static int do_tkill(pid_t tgid, pid_t pid, int sig)
2980{
2981 struct siginfo info = {};
2982
2983 info.si_signo = sig;
2984 info.si_errno = 0;
2985 info.si_code = SI_TKILL;
2986 info.si_pid = task_tgid_vnr(current);
2987 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2988
2989 return do_send_specific(tgid, pid, sig, &info);
2990}
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3003{
3004
3005 if (pid <= 0 || tgid <= 0)
3006 return -EINVAL;
3007
3008 return do_tkill(tgid, pid, sig);
3009}
3010
3011
3012
3013
3014
3015
3016
3017
3018SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3019{
3020
3021 if (pid <= 0)
3022 return -EINVAL;
3023
3024 return do_tkill(0, pid, sig);
3025}
3026
3027static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3028{
3029
3030
3031
3032 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3033 (task_pid_vnr(current) != pid))
3034 return -EPERM;
3035
3036 info->si_signo = sig;
3037
3038
3039 return kill_proc_info(sig, info, pid);
3040}
3041
3042
3043
3044
3045
3046
3047
3048SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3049 siginfo_t __user *, uinfo)
3050{
3051 siginfo_t info;
3052 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3053 return -EFAULT;
3054 return do_rt_sigqueueinfo(pid, sig, &info);
3055}
3056
3057#ifdef CONFIG_COMPAT
3058COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3059 compat_pid_t, pid,
3060 int, sig,
3061 struct compat_siginfo __user *, uinfo)
3062{
3063 siginfo_t info = {};
3064 int ret = copy_siginfo_from_user32(&info, uinfo);
3065 if (unlikely(ret))
3066 return ret;
3067 return do_rt_sigqueueinfo(pid, sig, &info);
3068}
3069#endif
3070
3071static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3072{
3073
3074 if (pid <= 0 || tgid <= 0)
3075 return -EINVAL;
3076
3077
3078
3079
3080 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3081 (task_pid_vnr(current) != pid))
3082 return -EPERM;
3083
3084 info->si_signo = sig;
3085
3086 return do_send_specific(tgid, pid, sig, info);
3087}
3088
3089SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3090 siginfo_t __user *, uinfo)
3091{
3092 siginfo_t info;
3093
3094 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3095 return -EFAULT;
3096
3097 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3098}
3099
3100#ifdef CONFIG_COMPAT
3101COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3102 compat_pid_t, tgid,
3103 compat_pid_t, pid,
3104 int, sig,
3105 struct compat_siginfo __user *, uinfo)
3106{
3107 siginfo_t info = {};
3108
3109 if (copy_siginfo_from_user32(&info, uinfo))
3110 return -EFAULT;
3111 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3112}
3113#endif
3114
3115
3116
3117
3118void kernel_sigaction(int sig, __sighandler_t action)
3119{
3120 spin_lock_irq(¤t->sighand->siglock);
3121 current->sighand->action[sig - 1].sa.sa_handler = action;
3122 if (action == SIG_IGN) {
3123 sigset_t mask;
3124
3125 sigemptyset(&mask);
3126 sigaddset(&mask, sig);
3127
3128 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3129 flush_sigqueue_mask(&mask, ¤t->pending);
3130 recalc_sigpending();
3131 }
3132 spin_unlock_irq(¤t->sighand->siglock);
3133}
3134EXPORT_SYMBOL(kernel_sigaction);
3135
3136void __weak sigaction_compat_abi(struct k_sigaction *act,
3137 struct k_sigaction *oact)
3138{
3139}
3140
3141int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3142{
3143 struct task_struct *p = current, *t;
3144 struct k_sigaction *k;
3145 sigset_t mask;
3146
3147 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3148 return -EINVAL;
3149
3150 k = &p->sighand->action[sig-1];
3151
3152 spin_lock_irq(&p->sighand->siglock);
3153 if (oact)
3154 *oact = *k;
3155
3156 sigaction_compat_abi(act, oact);
3157
3158 if (act) {
3159 sigdelsetmask(&act->sa.sa_mask,
3160 sigmask(SIGKILL) | sigmask(SIGSTOP));
3161 *k = *act;
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3174 sigemptyset(&mask);
3175 sigaddset(&mask, sig);
3176 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3177 for_each_thread(p, t)
3178 flush_sigqueue_mask(&mask, &t->pending);
3179 }
3180 }
3181
3182 spin_unlock_irq(&p->sighand->siglock);
3183 return 0;
3184}
3185
3186static int
3187do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
3188{
3189 struct task_struct *t = current;
3190
3191 if (oss) {
3192 memset(oss, 0, sizeof(stack_t));
3193 oss->ss_sp = (void __user *) t->sas_ss_sp;
3194 oss->ss_size = t->sas_ss_size;
3195 oss->ss_flags = sas_ss_flags(sp) |
3196 (current->sas_ss_flags & SS_FLAG_BITS);
3197 }
3198
3199 if (ss) {
3200 void __user *ss_sp = ss->ss_sp;
3201 size_t ss_size = ss->ss_size;
3202 unsigned ss_flags = ss->ss_flags;
3203 int ss_mode;
3204
3205 if (unlikely(on_sig_stack(sp)))
3206 return -EPERM;
3207
3208 ss_mode = ss_flags & ~SS_FLAG_BITS;
3209 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3210 ss_mode != 0))
3211 return -EINVAL;
3212
3213 if (ss_mode == SS_DISABLE) {
3214 ss_size = 0;
3215 ss_sp = NULL;
3216 } else {
3217 if (unlikely(ss_size < MINSIGSTKSZ))
3218 return -ENOMEM;
3219 }
3220
3221 t->sas_ss_sp = (unsigned long) ss_sp;
3222 t->sas_ss_size = ss_size;
3223 t->sas_ss_flags = ss_flags;
3224 }
3225 return 0;
3226}
3227
3228SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3229{
3230 stack_t new, old;
3231 int err;
3232 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3233 return -EFAULT;
3234 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3235 current_user_stack_pointer());
3236 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3237 err = -EFAULT;
3238 return err;
3239}
3240
3241int restore_altstack(const stack_t __user *uss)
3242{
3243 stack_t new;
3244 if (copy_from_user(&new, uss, sizeof(stack_t)))
3245 return -EFAULT;
3246 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
3247
3248 return 0;
3249}
3250
3251int __save_altstack(stack_t __user *uss, unsigned long sp)
3252{
3253 struct task_struct *t = current;
3254 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3255 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3256 __put_user(t->sas_ss_size, &uss->ss_size);
3257 if (err)
3258 return err;
3259 if (t->sas_ss_flags & SS_AUTODISARM)
3260 sas_ss_reset(t);
3261 return 0;
3262}
3263
3264#ifdef CONFIG_COMPAT
3265COMPAT_SYSCALL_DEFINE2(sigaltstack,
3266 const compat_stack_t __user *, uss_ptr,
3267 compat_stack_t __user *, uoss_ptr)
3268{
3269 stack_t uss, uoss;
3270 int ret;
3271
3272 if (uss_ptr) {
3273 compat_stack_t uss32;
3274 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3275 return -EFAULT;
3276 uss.ss_sp = compat_ptr(uss32.ss_sp);
3277 uss.ss_flags = uss32.ss_flags;
3278 uss.ss_size = uss32.ss_size;
3279 }
3280 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
3281 compat_user_stack_pointer());
3282 if (ret >= 0 && uoss_ptr) {
3283 compat_stack_t old;
3284 memset(&old, 0, sizeof(old));
3285 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3286 old.ss_flags = uoss.ss_flags;
3287 old.ss_size = uoss.ss_size;
3288 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
3289 ret = -EFAULT;
3290 }
3291 return ret;
3292}
3293
3294int compat_restore_altstack(const compat_stack_t __user *uss)
3295{
3296 int err = compat_sys_sigaltstack(uss, NULL);
3297
3298 return err == -EFAULT ? err : 0;
3299}
3300
3301int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3302{
3303 int err;
3304 struct task_struct *t = current;
3305 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3306 &uss->ss_sp) |
3307 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3308 __put_user(t->sas_ss_size, &uss->ss_size);
3309 if (err)
3310 return err;
3311 if (t->sas_ss_flags & SS_AUTODISARM)
3312 sas_ss_reset(t);
3313 return 0;
3314}
3315#endif
3316
3317#ifdef __ARCH_WANT_SYS_SIGPENDING
3318
3319
3320
3321
3322
3323SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3324{
3325 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3326}
3327
3328#ifdef CONFIG_COMPAT
3329COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3330{
3331 sigset_t set;
3332 int err = do_sigpending(&set);
3333 if (!err)
3334 err = put_user(set.sig[0], set32);
3335 return err;
3336}
3337#endif
3338
3339#endif
3340
3341#ifdef __ARCH_WANT_SYS_SIGPROCMASK
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3353 old_sigset_t __user *, oset)
3354{
3355 old_sigset_t old_set, new_set;
3356 sigset_t new_blocked;
3357
3358 old_set = current->blocked.sig[0];
3359
3360 if (nset) {
3361 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3362 return -EFAULT;
3363
3364 new_blocked = current->blocked;
3365
3366 switch (how) {
3367 case SIG_BLOCK:
3368 sigaddsetmask(&new_blocked, new_set);
3369 break;
3370 case SIG_UNBLOCK:
3371 sigdelsetmask(&new_blocked, new_set);
3372 break;
3373 case SIG_SETMASK:
3374 new_blocked.sig[0] = new_set;
3375 break;
3376 default:
3377 return -EINVAL;
3378 }
3379
3380 set_current_blocked(&new_blocked);
3381 }
3382
3383 if (oset) {
3384 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3385 return -EFAULT;
3386 }
3387
3388 return 0;
3389}
3390#endif
3391
3392#ifndef CONFIG_ODD_RT_SIGACTION
3393
3394
3395
3396
3397
3398
3399
3400SYSCALL_DEFINE4(rt_sigaction, int, sig,
3401 const struct sigaction __user *, act,
3402 struct sigaction __user *, oact,
3403 size_t, sigsetsize)
3404{
3405 struct k_sigaction new_sa, old_sa;
3406 int ret = -EINVAL;
3407
3408
3409 if (sigsetsize != sizeof(sigset_t))
3410 goto out;
3411
3412 if (act) {
3413 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3414 return -EFAULT;
3415 }
3416
3417 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3418
3419 if (!ret && oact) {
3420 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3421 return -EFAULT;
3422 }
3423out:
3424 return ret;
3425}
3426#ifdef CONFIG_COMPAT
3427COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3428 const struct compat_sigaction __user *, act,
3429 struct compat_sigaction __user *, oact,
3430 compat_size_t, sigsetsize)
3431{
3432 struct k_sigaction new_ka, old_ka;
3433#ifdef __ARCH_HAS_SA_RESTORER
3434 compat_uptr_t restorer;
3435#endif
3436 int ret;
3437
3438
3439 if (sigsetsize != sizeof(compat_sigset_t))
3440 return -EINVAL;
3441
3442 if (act) {
3443 compat_uptr_t handler;
3444 ret = get_user(handler, &act->sa_handler);
3445 new_ka.sa.sa_handler = compat_ptr(handler);
3446#ifdef __ARCH_HAS_SA_RESTORER
3447 ret |= get_user(restorer, &act->sa_restorer);
3448 new_ka.sa.sa_restorer = compat_ptr(restorer);
3449#endif
3450 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
3451 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3452 if (ret)
3453 return -EFAULT;
3454 }
3455
3456 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3457 if (!ret && oact) {
3458 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3459 &oact->sa_handler);
3460 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
3461 sizeof(oact->sa_mask));
3462 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3463#ifdef __ARCH_HAS_SA_RESTORER
3464 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3465 &oact->sa_restorer);
3466#endif
3467 }
3468 return ret;
3469}
3470#endif
3471#endif
3472
3473#ifdef CONFIG_OLD_SIGACTION
3474SYSCALL_DEFINE3(sigaction, int, sig,
3475 const struct old_sigaction __user *, act,
3476 struct old_sigaction __user *, oact)
3477{
3478 struct k_sigaction new_ka, old_ka;
3479 int ret;
3480
3481 if (act) {
3482 old_sigset_t mask;
3483 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3484 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3485 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3486 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3487 __get_user(mask, &act->sa_mask))
3488 return -EFAULT;
3489#ifdef __ARCH_HAS_KA_RESTORER
3490 new_ka.ka_restorer = NULL;
3491#endif
3492 siginitset(&new_ka.sa.sa_mask, mask);
3493 }
3494
3495 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3496
3497 if (!ret && oact) {
3498 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3499 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3500 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3501 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3502 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3503 return -EFAULT;
3504 }
3505
3506 return ret;
3507}
3508#endif
3509#ifdef CONFIG_COMPAT_OLD_SIGACTION
3510COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3511 const struct compat_old_sigaction __user *, act,
3512 struct compat_old_sigaction __user *, oact)
3513{
3514 struct k_sigaction new_ka, old_ka;
3515 int ret;
3516 compat_old_sigset_t mask;
3517 compat_uptr_t handler, restorer;
3518
3519 if (act) {
3520 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3521 __get_user(handler, &act->sa_handler) ||
3522 __get_user(restorer, &act->sa_restorer) ||
3523 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3524 __get_user(mask, &act->sa_mask))
3525 return -EFAULT;
3526
3527#ifdef __ARCH_HAS_KA_RESTORER
3528 new_ka.ka_restorer = NULL;
3529#endif
3530 new_ka.sa.sa_handler = compat_ptr(handler);
3531 new_ka.sa.sa_restorer = compat_ptr(restorer);
3532 siginitset(&new_ka.sa.sa_mask, mask);
3533 }
3534
3535 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3536
3537 if (!ret && oact) {
3538 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3539 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3540 &oact->sa_handler) ||
3541 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3542 &oact->sa_restorer) ||
3543 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3544 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3545 return -EFAULT;
3546 }
3547 return ret;
3548}
3549#endif
3550
3551#ifdef CONFIG_SGETMASK_SYSCALL
3552
3553
3554
3555
3556SYSCALL_DEFINE0(sgetmask)
3557{
3558
3559 return current->blocked.sig[0];
3560}
3561
3562SYSCALL_DEFINE1(ssetmask, int, newmask)
3563{
3564 int old = current->blocked.sig[0];
3565 sigset_t newset;
3566
3567 siginitset(&newset, newmask);
3568 set_current_blocked(&newset);
3569
3570 return old;
3571}
3572#endif
3573
3574#ifdef __ARCH_WANT_SYS_SIGNAL
3575
3576
3577
3578SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3579{
3580 struct k_sigaction new_sa, old_sa;
3581 int ret;
3582
3583 new_sa.sa.sa_handler = handler;
3584 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3585 sigemptyset(&new_sa.sa.sa_mask);
3586
3587 ret = do_sigaction(sig, &new_sa, &old_sa);
3588
3589 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3590}
3591#endif
3592
3593#ifdef __ARCH_WANT_SYS_PAUSE
3594
3595SYSCALL_DEFINE0(pause)
3596{
3597 while (!signal_pending(current)) {
3598 __set_current_state(TASK_INTERRUPTIBLE);
3599 schedule();
3600 }
3601 return -ERESTARTNOHAND;
3602}
3603
3604#endif
3605
3606static int sigsuspend(sigset_t *set)
3607{
3608 current->saved_sigmask = current->blocked;
3609 set_current_blocked(set);
3610
3611 while (!signal_pending(current)) {
3612 __set_current_state(TASK_INTERRUPTIBLE);
3613 schedule();
3614 }
3615 set_restore_sigmask();
3616 return -ERESTARTNOHAND;
3617}
3618
3619
3620
3621
3622
3623
3624
3625SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3626{
3627 sigset_t newset;
3628
3629
3630 if (sigsetsize != sizeof(sigset_t))
3631 return -EINVAL;
3632
3633 if (copy_from_user(&newset, unewset, sizeof(newset)))
3634 return -EFAULT;
3635 return sigsuspend(&newset);
3636}
3637
3638#ifdef CONFIG_COMPAT
3639COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3640{
3641 sigset_t newset;
3642
3643
3644 if (sigsetsize != sizeof(sigset_t))
3645 return -EINVAL;
3646
3647 if (get_compat_sigset(&newset, unewset))
3648 return -EFAULT;
3649 return sigsuspend(&newset);
3650}
3651#endif
3652
3653#ifdef CONFIG_OLD_SIGSUSPEND
3654SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3655{
3656 sigset_t blocked;
3657 siginitset(&blocked, mask);
3658 return sigsuspend(&blocked);
3659}
3660#endif
3661#ifdef CONFIG_OLD_SIGSUSPEND3
3662SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3663{
3664 sigset_t blocked;
3665 siginitset(&blocked, mask);
3666 return sigsuspend(&blocked);
3667}
3668#endif
3669
3670__weak const char *arch_vma_name(struct vm_area_struct *vma)
3671{
3672 return NULL;
3673}
3674
3675void __init signals_init(void)
3676{
3677
3678 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3679 != offsetof(struct siginfo, _sifields._pad));
3680
3681 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3682}
3683
3684#ifdef CONFIG_KGDB_KDB
3685#include <linux/kdb.h>
3686
3687
3688
3689
3690
3691
3692void
3693kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3694{
3695 static struct task_struct *kdb_prev_t;
3696 int sig, new_t;
3697 if (!spin_trylock(&t->sighand->siglock)) {
3698 kdb_printf("Can't do kill command now.\n"
3699 "The sigmask lock is held somewhere else in "
3700 "kernel, try again later\n");
3701 return;
3702 }
3703 spin_unlock(&t->sighand->siglock);
3704 new_t = kdb_prev_t != t;
3705 kdb_prev_t = t;
3706 if (t->state != TASK_RUNNING && new_t) {
3707 kdb_printf("Process is not RUNNING, sending a signal from "
3708 "kdb risks deadlock\n"
3709 "on the run queue locks. "
3710 "The signal has _not_ been sent.\n"
3711 "Reissue the kill command if you want to risk "
3712 "the deadlock.\n");
3713 return;
3714 }
3715 sig = info->si_signo;
3716 if (send_sig_info(sig, info, t))
3717 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3718 sig, t->pid);
3719 else
3720 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3721}
3722#endif
3723