1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/slab.h>
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/user.h>
18#include <linux/sched/debug.h>
19#include <linux/sched/task.h>
20#include <linux/sched/task_stack.h>
21#include <linux/sched/cputime.h>
22#include <linux/fs.h>
23#include <linux/tty.h>
24#include <linux/binfmts.h>
25#include <linux/coredump.h>
26#include <linux/security.h>
27#include <linux/syscalls.h>
28#include <linux/ptrace.h>
29#include <linux/signal.h>
30#include <linux/signalfd.h>
31#include <linux/ratelimit.h>
32#include <linux/tracehook.h>
33#include <linux/capability.h>
34#include <linux/freezer.h>
35#include <linux/pid_namespace.h>
36#include <linux/nsproxy.h>
37#include <linux/user_namespace.h>
38#include <linux/uprobes.h>
39#include <linux/compat.h>
40#include <linux/cn_proc.h>
41#include <linux/compiler.h>
42#include <linux/posix-timers.h>
43
44#define CREATE_TRACE_POINTS
45#include <trace/events/signal.h>
46
47#include <asm/param.h>
48#include <linux/uaccess.h>
49#include <asm/unistd.h>
50#include <asm/siginfo.h>
51#include <asm/cacheflush.h>
52#include "audit.h"
53
54
55
56
57
58static struct kmem_cache *sigqueue_cachep;
59
60int print_fatal_signals __read_mostly;
61
62static void __user *sig_handler(struct task_struct *t, int sig)
63{
64 return t->sighand->action[sig - 1].sa.sa_handler;
65}
66
67static int sig_handler_ignored(void __user *handler, int sig)
68{
69
70 return handler == SIG_IGN ||
71 (handler == SIG_DFL && sig_kernel_ignore(sig));
72}
73
74static int sig_task_ignored(struct task_struct *t, int sig, bool force)
75{
76 void __user *handler;
77
78 handler = sig_handler(t, sig);
79
80 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
81 handler == SIG_DFL && !force)
82 return 1;
83
84 return sig_handler_ignored(handler, sig);
85}
86
87static int sig_ignored(struct task_struct *t, int sig, bool force)
88{
89
90
91
92
93
94 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
95 return 0;
96
97 if (!sig_task_ignored(t, sig, force))
98 return 0;
99
100
101
102
103 return !t->ptrace;
104}
105
106
107
108
109
110static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
111{
112 unsigned long ready;
113 long i;
114
115 switch (_NSIG_WORDS) {
116 default:
117 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
118 ready |= signal->sig[i] &~ blocked->sig[i];
119 break;
120
121 case 4: ready = signal->sig[3] &~ blocked->sig[3];
122 ready |= signal->sig[2] &~ blocked->sig[2];
123 ready |= signal->sig[1] &~ blocked->sig[1];
124 ready |= signal->sig[0] &~ blocked->sig[0];
125 break;
126
127 case 2: ready = signal->sig[1] &~ blocked->sig[1];
128 ready |= signal->sig[0] &~ blocked->sig[0];
129 break;
130
131 case 1: ready = signal->sig[0] &~ blocked->sig[0];
132 }
133 return ready != 0;
134}
135
136#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
137
138static int recalc_sigpending_tsk(struct task_struct *t)
139{
140 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
141 PENDING(&t->pending, &t->blocked) ||
142 PENDING(&t->signal->shared_pending, &t->blocked)) {
143 set_tsk_thread_flag(t, TIF_SIGPENDING);
144 return 1;
145 }
146
147
148
149
150
151 return 0;
152}
153
154
155
156
157
158void recalc_sigpending_and_wake(struct task_struct *t)
159{
160 if (recalc_sigpending_tsk(t))
161 signal_wake_up(t, 0);
162}
163
164void recalc_sigpending(void)
165{
166 if (!recalc_sigpending_tsk(current) && !freezing(current))
167 clear_thread_flag(TIF_SIGPENDING);
168
169}
170
171
172
173#define SYNCHRONOUS_MASK \
174 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
175 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
176
177int next_signal(struct sigpending *pending, sigset_t *mask)
178{
179 unsigned long i, *s, *m, x;
180 int sig = 0;
181
182 s = pending->signal.sig;
183 m = mask->sig;
184
185
186
187
188
189 x = *s &~ *m;
190 if (x) {
191 if (x & SYNCHRONOUS_MASK)
192 x &= SYNCHRONOUS_MASK;
193 sig = ffz(~x) + 1;
194 return sig;
195 }
196
197 switch (_NSIG_WORDS) {
198 default:
199 for (i = 1; i < _NSIG_WORDS; ++i) {
200 x = *++s &~ *++m;
201 if (!x)
202 continue;
203 sig = ffz(~x) + i*_NSIG_BPW + 1;
204 break;
205 }
206 break;
207
208 case 2:
209 x = s[1] &~ m[1];
210 if (!x)
211 break;
212 sig = ffz(~x) + _NSIG_BPW + 1;
213 break;
214
215 case 1:
216
217 break;
218 }
219
220 return sig;
221}
222
223static inline void print_dropped_signal(int sig)
224{
225 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
226
227 if (!print_fatal_signals)
228 return;
229
230 if (!__ratelimit(&ratelimit_state))
231 return;
232
233 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
234 current->comm, current->pid, sig);
235}
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
255{
256 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
257 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
258 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
259
260 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
261 return false;
262
263 if (mask & JOBCTL_STOP_SIGMASK)
264 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
265
266 task->jobctl |= mask;
267 return true;
268}
269
270
271
272
273
274
275
276
277
278
279
280
281
282void task_clear_jobctl_trapping(struct task_struct *task)
283{
284 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
285 task->jobctl &= ~JOBCTL_TRAPPING;
286 smp_mb();
287 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
288 }
289}
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
307{
308 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
309
310 if (mask & JOBCTL_STOP_PENDING)
311 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
312
313 task->jobctl &= ~mask;
314
315 if (!(task->jobctl & JOBCTL_PENDING_MASK))
316 task_clear_jobctl_trapping(task);
317}
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335static bool task_participate_group_stop(struct task_struct *task)
336{
337 struct signal_struct *sig = task->signal;
338 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
339
340 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
341
342 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
343
344 if (!consume)
345 return false;
346
347 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
348 sig->group_stop_count--;
349
350
351
352
353
354 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
355 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
356 return true;
357 }
358 return false;
359}
360
361
362
363
364
365
366static struct sigqueue *
367__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
368{
369 struct sigqueue *q = NULL;
370 struct user_struct *user;
371
372
373
374
375
376 rcu_read_lock();
377 user = get_uid(__task_cred(t)->user);
378 atomic_inc(&user->sigpending);
379 rcu_read_unlock();
380
381 if (override_rlimit ||
382 atomic_read(&user->sigpending) <=
383 task_rlimit(t, RLIMIT_SIGPENDING)) {
384 q = kmem_cache_alloc(sigqueue_cachep, flags);
385 } else {
386 print_dropped_signal(sig);
387 }
388
389 if (unlikely(q == NULL)) {
390 atomic_dec(&user->sigpending);
391 free_uid(user);
392 } else {
393 INIT_LIST_HEAD(&q->list);
394 q->flags = 0;
395 q->user = user;
396 }
397
398 return q;
399}
400
401static void __sigqueue_free(struct sigqueue *q)
402{
403 if (q->flags & SIGQUEUE_PREALLOC)
404 return;
405 atomic_dec(&q->user->sigpending);
406 free_uid(q->user);
407 kmem_cache_free(sigqueue_cachep, q);
408}
409
410void flush_sigqueue(struct sigpending *queue)
411{
412 struct sigqueue *q;
413
414 sigemptyset(&queue->signal);
415 while (!list_empty(&queue->list)) {
416 q = list_entry(queue->list.next, struct sigqueue , list);
417 list_del_init(&q->list);
418 __sigqueue_free(q);
419 }
420}
421
422
423
424
425void flush_signals(struct task_struct *t)
426{
427 unsigned long flags;
428
429 spin_lock_irqsave(&t->sighand->siglock, flags);
430 clear_tsk_thread_flag(t, TIF_SIGPENDING);
431 flush_sigqueue(&t->pending);
432 flush_sigqueue(&t->signal->shared_pending);
433 spin_unlock_irqrestore(&t->sighand->siglock, flags);
434}
435
436#ifdef CONFIG_POSIX_TIMERS
437static void __flush_itimer_signals(struct sigpending *pending)
438{
439 sigset_t signal, retain;
440 struct sigqueue *q, *n;
441
442 signal = pending->signal;
443 sigemptyset(&retain);
444
445 list_for_each_entry_safe(q, n, &pending->list, list) {
446 int sig = q->info.si_signo;
447
448 if (likely(q->info.si_code != SI_TIMER)) {
449 sigaddset(&retain, sig);
450 } else {
451 sigdelset(&signal, sig);
452 list_del_init(&q->list);
453 __sigqueue_free(q);
454 }
455 }
456
457 sigorsets(&pending->signal, &signal, &retain);
458}
459
460void flush_itimer_signals(void)
461{
462 struct task_struct *tsk = current;
463 unsigned long flags;
464
465 spin_lock_irqsave(&tsk->sighand->siglock, flags);
466 __flush_itimer_signals(&tsk->pending);
467 __flush_itimer_signals(&tsk->signal->shared_pending);
468 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
469}
470#endif
471
472void ignore_signals(struct task_struct *t)
473{
474 int i;
475
476 for (i = 0; i < _NSIG; ++i)
477 t->sighand->action[i].sa.sa_handler = SIG_IGN;
478
479 flush_signals(t);
480}
481
482
483
484
485
486void
487flush_signal_handlers(struct task_struct *t, int force_default)
488{
489 int i;
490 struct k_sigaction *ka = &t->sighand->action[0];
491 for (i = _NSIG ; i != 0 ; i--) {
492 if (force_default || ka->sa.sa_handler != SIG_IGN)
493 ka->sa.sa_handler = SIG_DFL;
494 ka->sa.sa_flags = 0;
495#ifdef __ARCH_HAS_SA_RESTORER
496 ka->sa.sa_restorer = NULL;
497#endif
498 sigemptyset(&ka->sa.sa_mask);
499 ka++;
500 }
501}
502
503int unhandled_signal(struct task_struct *tsk, int sig)
504{
505 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
506 if (is_global_init(tsk))
507 return 1;
508 if (handler != SIG_IGN && handler != SIG_DFL)
509 return 0;
510
511 return !tsk->ptrace;
512}
513
514static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
515 bool *resched_timer)
516{
517 struct sigqueue *q, *first = NULL;
518
519
520
521
522
523 list_for_each_entry(q, &list->list, list) {
524 if (q->info.si_signo == sig) {
525 if (first)
526 goto still_pending;
527 first = q;
528 }
529 }
530
531 sigdelset(&list->signal, sig);
532
533 if (first) {
534still_pending:
535 list_del_init(&first->list);
536 copy_siginfo(info, &first->info);
537
538 *resched_timer =
539 (first->flags & SIGQUEUE_PREALLOC) &&
540 (info->si_code == SI_TIMER) &&
541 (info->si_sys_private);
542
543 __sigqueue_free(first);
544 } else {
545
546
547
548
549
550 info->si_signo = sig;
551 info->si_errno = 0;
552 info->si_code = SI_USER;
553 info->si_pid = 0;
554 info->si_uid = 0;
555 }
556}
557
558static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
559 siginfo_t *info, bool *resched_timer)
560{
561 int sig = next_signal(pending, mask);
562
563 if (sig)
564 collect_signal(sig, pending, info, resched_timer);
565 return sig;
566}
567
568
569
570
571
572
573
574int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
575{
576 bool resched_timer = false;
577 int signr;
578
579
580
581
582 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
583 if (!signr) {
584 signr = __dequeue_signal(&tsk->signal->shared_pending,
585 mask, info, &resched_timer);
586#ifdef CONFIG_POSIX_TIMERS
587
588
589
590
591
592
593
594
595
596
597
598
599
600 if (unlikely(signr == SIGALRM)) {
601 struct hrtimer *tmr = &tsk->signal->real_timer;
602
603 if (!hrtimer_is_queued(tmr) &&
604 tsk->signal->it_real_incr != 0) {
605 hrtimer_forward(tmr, tmr->base->get_time(),
606 tsk->signal->it_real_incr);
607 hrtimer_restart(tmr);
608 }
609 }
610#endif
611 }
612
613 recalc_sigpending();
614 if (!signr)
615 return 0;
616
617 if (unlikely(sig_kernel_stop(signr))) {
618
619
620
621
622
623
624
625
626
627
628
629
630 current->jobctl |= JOBCTL_STOP_DEQUEUED;
631 }
632#ifdef CONFIG_POSIX_TIMERS
633 if (resched_timer) {
634
635
636
637
638
639
640 spin_unlock(&tsk->sighand->siglock);
641 posixtimer_rearm(info);
642 spin_lock(&tsk->sighand->siglock);
643 }
644#endif
645 return signr;
646}
647
648
649
650
651
652
653
654
655
656
657
658
659void signal_wake_up_state(struct task_struct *t, unsigned int state)
660{
661 set_tsk_thread_flag(t, TIF_SIGPENDING);
662
663
664
665
666
667
668
669 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
670 kick_process(t);
671}
672
673
674
675
676
677
678
679static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
680{
681 struct sigqueue *q, *n;
682 sigset_t m;
683
684 sigandsets(&m, mask, &s->signal);
685 if (sigisemptyset(&m))
686 return 0;
687
688 sigandnsets(&s->signal, &s->signal, mask);
689 list_for_each_entry_safe(q, n, &s->list, list) {
690 if (sigismember(mask, q->info.si_signo)) {
691 list_del_init(&q->list);
692 __sigqueue_free(q);
693 }
694 }
695 return 1;
696}
697
698static inline int is_si_special(const struct siginfo *info)
699{
700 return info <= SEND_SIG_FORCED;
701}
702
703static inline bool si_fromuser(const struct siginfo *info)
704{
705 return info == SEND_SIG_NOINFO ||
706 (!is_si_special(info) && SI_FROMUSER(info));
707}
708
709
710
711
712static int kill_ok_by_cred(struct task_struct *t)
713{
714 const struct cred *cred = current_cred();
715 const struct cred *tcred = __task_cred(t);
716
717 if (uid_eq(cred->euid, tcred->suid) ||
718 uid_eq(cred->euid, tcred->uid) ||
719 uid_eq(cred->uid, tcred->suid) ||
720 uid_eq(cred->uid, tcred->uid))
721 return 1;
722
723 if (ns_capable(tcred->user_ns, CAP_KILL))
724 return 1;
725
726 return 0;
727}
728
729
730
731
732
733static int check_kill_permission(int sig, struct siginfo *info,
734 struct task_struct *t)
735{
736 struct pid *sid;
737 int error;
738
739 if (!valid_signal(sig))
740 return -EINVAL;
741
742 if (!si_fromuser(info))
743 return 0;
744
745 error = audit_signal_info(sig, t);
746 if (error)
747 return error;
748
749 if (!same_thread_group(current, t) &&
750 !kill_ok_by_cred(t)) {
751 switch (sig) {
752 case SIGCONT:
753 sid = task_session(t);
754
755
756
757
758 if (!sid || sid == task_session(current))
759 break;
760 default:
761 return -EPERM;
762 }
763 }
764
765 return security_task_kill(t, info, sig, 0);
766}
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785static void ptrace_trap_notify(struct task_struct *t)
786{
787 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
788 assert_spin_locked(&t->sighand->siglock);
789
790 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
791 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
792}
793
794
795
796
797
798
799
800
801
802
803
804static bool prepare_signal(int sig, struct task_struct *p, bool force)
805{
806 struct signal_struct *signal = p->signal;
807 struct task_struct *t;
808 sigset_t flush;
809
810 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
811 if (!(signal->flags & SIGNAL_GROUP_EXIT))
812 return sig == SIGKILL;
813
814
815
816 } else if (sig_kernel_stop(sig)) {
817
818
819
820 siginitset(&flush, sigmask(SIGCONT));
821 flush_sigqueue_mask(&flush, &signal->shared_pending);
822 for_each_thread(p, t)
823 flush_sigqueue_mask(&flush, &t->pending);
824 } else if (sig == SIGCONT) {
825 unsigned int why;
826
827
828
829 siginitset(&flush, SIG_KERNEL_STOP_MASK);
830 flush_sigqueue_mask(&flush, &signal->shared_pending);
831 for_each_thread(p, t) {
832 flush_sigqueue_mask(&flush, &t->pending);
833 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
834 if (likely(!(t->ptrace & PT_SEIZED)))
835 wake_up_state(t, __TASK_STOPPED);
836 else
837 ptrace_trap_notify(t);
838 }
839
840
841
842
843
844
845
846
847
848 why = 0;
849 if (signal->flags & SIGNAL_STOP_STOPPED)
850 why |= SIGNAL_CLD_CONTINUED;
851 else if (signal->group_stop_count)
852 why |= SIGNAL_CLD_STOPPED;
853
854 if (why) {
855
856
857
858
859
860 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
861 signal->group_stop_count = 0;
862 signal->group_exit_code = 0;
863 }
864 }
865
866 return !sig_ignored(p, sig, force);
867}
868
869
870
871
872
873
874
875
876
877static inline int wants_signal(int sig, struct task_struct *p)
878{
879 if (sigismember(&p->blocked, sig))
880 return 0;
881 if (p->flags & PF_EXITING)
882 return 0;
883 if (sig == SIGKILL)
884 return 1;
885 if (task_is_stopped_or_traced(p))
886 return 0;
887 return task_curr(p) || !signal_pending(p);
888}
889
890static void complete_signal(int sig, struct task_struct *p, int group)
891{
892 struct signal_struct *signal = p->signal;
893 struct task_struct *t;
894
895
896
897
898
899
900
901 if (wants_signal(sig, p))
902 t = p;
903 else if (!group || thread_group_empty(p))
904
905
906
907
908 return;
909 else {
910
911
912
913 t = signal->curr_target;
914 while (!wants_signal(sig, t)) {
915 t = next_thread(t);
916 if (t == signal->curr_target)
917
918
919
920
921
922 return;
923 }
924 signal->curr_target = t;
925 }
926
927
928
929
930
931 if (sig_fatal(p, sig) &&
932 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
933 !sigismember(&t->real_blocked, sig) &&
934 (sig == SIGKILL || !t->ptrace)) {
935
936
937
938 if (!sig_kernel_coredump(sig)) {
939
940
941
942
943
944
945 signal->flags = SIGNAL_GROUP_EXIT;
946 signal->group_exit_code = sig;
947 signal->group_stop_count = 0;
948 t = p;
949 do {
950 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
951 sigaddset(&t->pending.signal, SIGKILL);
952 signal_wake_up(t, 1);
953 } while_each_thread(p, t);
954 return;
955 }
956 }
957
958
959
960
961
962 signal_wake_up(t, sig == SIGKILL);
963 return;
964}
965
966static inline int legacy_queue(struct sigpending *signals, int sig)
967{
968 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
969}
970
971#ifdef CONFIG_USER_NS
972static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
973{
974 if (current_user_ns() == task_cred_xxx(t, user_ns))
975 return;
976
977 if (SI_FROMKERNEL(info))
978 return;
979
980 rcu_read_lock();
981 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
982 make_kuid(current_user_ns(), info->si_uid));
983 rcu_read_unlock();
984}
985#else
986static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
987{
988 return;
989}
990#endif
991
992static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
993 int group, int from_ancestor_ns)
994{
995 struct sigpending *pending;
996 struct sigqueue *q;
997 int override_rlimit;
998 int ret = 0, result;
999
1000 assert_spin_locked(&t->sighand->siglock);
1001
1002 result = TRACE_SIGNAL_IGNORED;
1003 if (!prepare_signal(sig, t,
1004 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1005 goto ret;
1006
1007 pending = group ? &t->signal->shared_pending : &t->pending;
1008
1009
1010
1011
1012
1013 result = TRACE_SIGNAL_ALREADY_PENDING;
1014 if (legacy_queue(pending, sig))
1015 goto ret;
1016
1017 result = TRACE_SIGNAL_DELIVERED;
1018
1019
1020
1021
1022 if (info == SEND_SIG_FORCED)
1023 goto out_set;
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034 if (sig < SIGRTMIN)
1035 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1036 else
1037 override_rlimit = 0;
1038
1039 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1040 override_rlimit);
1041 if (q) {
1042 list_add_tail(&q->list, &pending->list);
1043 switch ((unsigned long) info) {
1044 case (unsigned long) SEND_SIG_NOINFO:
1045 q->info.si_signo = sig;
1046 q->info.si_errno = 0;
1047 q->info.si_code = SI_USER;
1048 q->info.si_pid = task_tgid_nr_ns(current,
1049 task_active_pid_ns(t));
1050 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1051 break;
1052 case (unsigned long) SEND_SIG_PRIV:
1053 q->info.si_signo = sig;
1054 q->info.si_errno = 0;
1055 q->info.si_code = SI_KERNEL;
1056 q->info.si_pid = 0;
1057 q->info.si_uid = 0;
1058 break;
1059 default:
1060 copy_siginfo(&q->info, info);
1061 if (from_ancestor_ns)
1062 q->info.si_pid = 0;
1063 break;
1064 }
1065
1066 userns_fixup_signal_uid(&q->info, t);
1067
1068 } else if (!is_si_special(info)) {
1069 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1070
1071
1072
1073
1074
1075 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1076 ret = -EAGAIN;
1077 goto ret;
1078 } else {
1079
1080
1081
1082
1083 result = TRACE_SIGNAL_LOSE_INFO;
1084 }
1085 }
1086
1087out_set:
1088 signalfd_notify(t, sig);
1089 sigaddset(&pending->signal, sig);
1090 complete_signal(sig, t, group);
1091ret:
1092 trace_signal_generate(sig, info, t, group, result);
1093 return ret;
1094}
1095
1096static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1097 int group)
1098{
1099 int from_ancestor_ns = 0;
1100
1101#ifdef CONFIG_PID_NS
1102 from_ancestor_ns = si_fromuser(info) &&
1103 !task_pid_nr_ns(current, task_active_pid_ns(t));
1104#endif
1105
1106 return __send_signal(sig, info, t, group, from_ancestor_ns);
1107}
1108
1109static void print_fatal_signal(int signr)
1110{
1111 struct pt_regs *regs = signal_pt_regs();
1112 pr_info("potentially unexpected fatal signal %d.\n", signr);
1113
1114#if defined(__i386__) && !defined(__arch_um__)
1115 pr_info("code at %08lx: ", regs->ip);
1116 {
1117 int i;
1118 for (i = 0; i < 16; i++) {
1119 unsigned char insn;
1120
1121 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1122 break;
1123 pr_cont("%02x ", insn);
1124 }
1125 }
1126 pr_cont("\n");
1127#endif
1128 preempt_disable();
1129 show_regs(regs);
1130 preempt_enable();
1131}
1132
1133static int __init setup_print_fatal_signals(char *str)
1134{
1135 get_option (&str, &print_fatal_signals);
1136
1137 return 1;
1138}
1139
1140__setup("print-fatal-signals=", setup_print_fatal_signals);
1141
1142int
1143__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1144{
1145 return send_signal(sig, info, p, 1);
1146}
1147
1148static int
1149specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1150{
1151 return send_signal(sig, info, t, 0);
1152}
1153
1154int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1155 bool group)
1156{
1157 unsigned long flags;
1158 int ret = -ESRCH;
1159
1160 if (lock_task_sighand(p, &flags)) {
1161 ret = send_signal(sig, info, p, group);
1162 unlock_task_sighand(p, &flags);
1163 }
1164
1165 return ret;
1166}
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179int
1180force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1181{
1182 unsigned long int flags;
1183 int ret, blocked, ignored;
1184 struct k_sigaction *action;
1185
1186 spin_lock_irqsave(&t->sighand->siglock, flags);
1187 action = &t->sighand->action[sig-1];
1188 ignored = action->sa.sa_handler == SIG_IGN;
1189 blocked = sigismember(&t->blocked, sig);
1190 if (blocked || ignored) {
1191 action->sa.sa_handler = SIG_DFL;
1192 if (blocked) {
1193 sigdelset(&t->blocked, sig);
1194 recalc_sigpending_and_wake(t);
1195 }
1196 }
1197
1198
1199
1200
1201 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1202 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1203 ret = specific_send_sig_info(sig, info, t);
1204 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1205
1206 return ret;
1207}
1208
1209
1210
1211
1212int zap_other_threads(struct task_struct *p)
1213{
1214 struct task_struct *t = p;
1215 int count = 0;
1216
1217 p->signal->group_stop_count = 0;
1218
1219 while_each_thread(p, t) {
1220 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1221 count++;
1222
1223
1224 if (t->exit_state)
1225 continue;
1226 sigaddset(&t->pending.signal, SIGKILL);
1227 signal_wake_up(t, 1);
1228 }
1229
1230 return count;
1231}
1232
1233struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1234 unsigned long *flags)
1235{
1236 struct sighand_struct *sighand;
1237
1238 for (;;) {
1239
1240
1241
1242
1243 local_irq_save(*flags);
1244 rcu_read_lock();
1245 sighand = rcu_dereference(tsk->sighand);
1246 if (unlikely(sighand == NULL)) {
1247 rcu_read_unlock();
1248 local_irq_restore(*flags);
1249 break;
1250 }
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262 spin_lock(&sighand->siglock);
1263 if (likely(sighand == tsk->sighand)) {
1264 rcu_read_unlock();
1265 break;
1266 }
1267 spin_unlock(&sighand->siglock);
1268 rcu_read_unlock();
1269 local_irq_restore(*flags);
1270 }
1271
1272 return sighand;
1273}
1274
1275
1276
1277
1278int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1279{
1280 int ret;
1281
1282 rcu_read_lock();
1283 ret = check_kill_permission(sig, info, p);
1284 rcu_read_unlock();
1285
1286 if (!ret && sig)
1287 ret = do_send_sig_info(sig, info, p, true);
1288
1289 return ret;
1290}
1291
1292
1293
1294
1295
1296
1297int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1298{
1299 struct task_struct *p = NULL;
1300 int retval, success;
1301
1302 success = 0;
1303 retval = -ESRCH;
1304 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1305 int err = group_send_sig_info(sig, info, p);
1306 success |= !err;
1307 retval = err;
1308 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1309 return success ? 0 : retval;
1310}
1311
1312int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1313{
1314 int error = -ESRCH;
1315 struct task_struct *p;
1316
1317 for (;;) {
1318 rcu_read_lock();
1319 p = pid_task(pid, PIDTYPE_PID);
1320 if (p)
1321 error = group_send_sig_info(sig, info, p);
1322 rcu_read_unlock();
1323 if (likely(!p || error != -ESRCH))
1324 return error;
1325
1326
1327
1328
1329
1330
1331 }
1332}
1333
1334static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1335{
1336 int error;
1337 rcu_read_lock();
1338 error = kill_pid_info(sig, info, find_vpid(pid));
1339 rcu_read_unlock();
1340 return error;
1341}
1342
1343static int kill_as_cred_perm(const struct cred *cred,
1344 struct task_struct *target)
1345{
1346 const struct cred *pcred = __task_cred(target);
1347 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1348 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1349 return 0;
1350 return 1;
1351}
1352
1353
1354int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1355 const struct cred *cred, u32 secid)
1356{
1357 int ret = -EINVAL;
1358 struct task_struct *p;
1359 unsigned long flags;
1360
1361 if (!valid_signal(sig))
1362 return ret;
1363
1364 rcu_read_lock();
1365 p = pid_task(pid, PIDTYPE_PID);
1366 if (!p) {
1367 ret = -ESRCH;
1368 goto out_unlock;
1369 }
1370 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1371 ret = -EPERM;
1372 goto out_unlock;
1373 }
1374 ret = security_task_kill(p, info, sig, secid);
1375 if (ret)
1376 goto out_unlock;
1377
1378 if (sig) {
1379 if (lock_task_sighand(p, &flags)) {
1380 ret = __send_signal(sig, info, p, 1, 0);
1381 unlock_task_sighand(p, &flags);
1382 } else
1383 ret = -ESRCH;
1384 }
1385out_unlock:
1386 rcu_read_unlock();
1387 return ret;
1388}
1389EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1390
1391
1392
1393
1394
1395
1396
1397
1398static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1399{
1400 int ret;
1401
1402 if (pid > 0) {
1403 rcu_read_lock();
1404 ret = kill_pid_info(sig, info, find_vpid(pid));
1405 rcu_read_unlock();
1406 return ret;
1407 }
1408
1409
1410 if (pid == INT_MIN)
1411 return -ESRCH;
1412
1413 read_lock(&tasklist_lock);
1414 if (pid != -1) {
1415 ret = __kill_pgrp_info(sig, info,
1416 pid ? find_vpid(-pid) : task_pgrp(current));
1417 } else {
1418 int retval = 0, count = 0;
1419 struct task_struct * p;
1420
1421 for_each_process(p) {
1422 if (task_pid_vnr(p) > 1 &&
1423 !same_thread_group(p, current)) {
1424 int err = group_send_sig_info(sig, info, p);
1425 ++count;
1426 if (err != -EPERM)
1427 retval = err;
1428 }
1429 }
1430 ret = count ? retval : -ESRCH;
1431 }
1432 read_unlock(&tasklist_lock);
1433
1434 return ret;
1435}
1436
1437
1438
1439
1440
1441int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1442{
1443
1444
1445
1446
1447 if (!valid_signal(sig))
1448 return -EINVAL;
1449
1450 return do_send_sig_info(sig, info, p, false);
1451}
1452
1453#define __si_special(priv) \
1454 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1455
1456int
1457send_sig(int sig, struct task_struct *p, int priv)
1458{
1459 return send_sig_info(sig, __si_special(priv), p);
1460}
1461
1462void
1463force_sig(int sig, struct task_struct *p)
1464{
1465 force_sig_info(sig, SEND_SIG_PRIV, p);
1466}
1467
1468
1469
1470
1471
1472
1473
1474int
1475force_sigsegv(int sig, struct task_struct *p)
1476{
1477 if (sig == SIGSEGV) {
1478 unsigned long flags;
1479 spin_lock_irqsave(&p->sighand->siglock, flags);
1480 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1481 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1482 }
1483 force_sig(SIGSEGV, p);
1484 return 0;
1485}
1486
1487int kill_pgrp(struct pid *pid, int sig, int priv)
1488{
1489 int ret;
1490
1491 read_lock(&tasklist_lock);
1492 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1493 read_unlock(&tasklist_lock);
1494
1495 return ret;
1496}
1497EXPORT_SYMBOL(kill_pgrp);
1498
1499int kill_pid(struct pid *pid, int sig, int priv)
1500{
1501 return kill_pid_info(sig, __si_special(priv), pid);
1502}
1503EXPORT_SYMBOL(kill_pid);
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514struct sigqueue *sigqueue_alloc(void)
1515{
1516 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1517
1518 if (q)
1519 q->flags |= SIGQUEUE_PREALLOC;
1520
1521 return q;
1522}
1523
1524void sigqueue_free(struct sigqueue *q)
1525{
1526 unsigned long flags;
1527 spinlock_t *lock = ¤t->sighand->siglock;
1528
1529 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1530
1531
1532
1533
1534
1535 spin_lock_irqsave(lock, flags);
1536 q->flags &= ~SIGQUEUE_PREALLOC;
1537
1538
1539
1540
1541 if (!list_empty(&q->list))
1542 q = NULL;
1543 spin_unlock_irqrestore(lock, flags);
1544
1545 if (q)
1546 __sigqueue_free(q);
1547}
1548
1549int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1550{
1551 int sig = q->info.si_signo;
1552 struct sigpending *pending;
1553 unsigned long flags;
1554 int ret, result;
1555
1556 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1557
1558 ret = -1;
1559 if (!likely(lock_task_sighand(t, &flags)))
1560 goto ret;
1561
1562 ret = 1;
1563 result = TRACE_SIGNAL_IGNORED;
1564 if (!prepare_signal(sig, t, false))
1565 goto out;
1566
1567 ret = 0;
1568 if (unlikely(!list_empty(&q->list))) {
1569
1570
1571
1572
1573 BUG_ON(q->info.si_code != SI_TIMER);
1574 q->info.si_overrun++;
1575 result = TRACE_SIGNAL_ALREADY_PENDING;
1576 goto out;
1577 }
1578 q->info.si_overrun = 0;
1579
1580 signalfd_notify(t, sig);
1581 pending = group ? &t->signal->shared_pending : &t->pending;
1582 list_add_tail(&q->list, &pending->list);
1583 sigaddset(&pending->signal, sig);
1584 complete_signal(sig, t, group);
1585 result = TRACE_SIGNAL_DELIVERED;
1586out:
1587 trace_signal_generate(sig, &q->info, t, group, result);
1588 unlock_task_sighand(t, &flags);
1589ret:
1590 return ret;
1591}
1592
1593
1594
1595
1596
1597
1598
1599
1600bool do_notify_parent(struct task_struct *tsk, int sig)
1601{
1602 struct siginfo info;
1603 unsigned long flags;
1604 struct sighand_struct *psig;
1605 bool autoreap = false;
1606 u64 utime, stime;
1607
1608 BUG_ON(sig == -1);
1609
1610
1611 BUG_ON(task_is_stopped_or_traced(tsk));
1612
1613 BUG_ON(!tsk->ptrace &&
1614 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1615
1616 if (sig != SIGCHLD) {
1617
1618
1619
1620
1621 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1622 sig = SIGCHLD;
1623 }
1624
1625 info.si_signo = sig;
1626 info.si_errno = 0;
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638 rcu_read_lock();
1639 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1640 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1641 task_uid(tsk));
1642 rcu_read_unlock();
1643
1644 task_cputime(tsk, &utime, &stime);
1645 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1646 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1647
1648 info.si_status = tsk->exit_code & 0x7f;
1649 if (tsk->exit_code & 0x80)
1650 info.si_code = CLD_DUMPED;
1651 else if (tsk->exit_code & 0x7f)
1652 info.si_code = CLD_KILLED;
1653 else {
1654 info.si_code = CLD_EXITED;
1655 info.si_status = tsk->exit_code >> 8;
1656 }
1657
1658 psig = tsk->parent->sighand;
1659 spin_lock_irqsave(&psig->siglock, flags);
1660 if (!tsk->ptrace && sig == SIGCHLD &&
1661 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1662 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678 autoreap = true;
1679 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1680 sig = 0;
1681 }
1682 if (valid_signal(sig) && sig)
1683 __group_send_sig_info(sig, &info, tsk->parent);
1684 __wake_up_parent(tsk, tsk->parent);
1685 spin_unlock_irqrestore(&psig->siglock, flags);
1686
1687 return autoreap;
1688}
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703static void do_notify_parent_cldstop(struct task_struct *tsk,
1704 bool for_ptracer, int why)
1705{
1706 struct siginfo info;
1707 unsigned long flags;
1708 struct task_struct *parent;
1709 struct sighand_struct *sighand;
1710 u64 utime, stime;
1711
1712 if (for_ptracer) {
1713 parent = tsk->parent;
1714 } else {
1715 tsk = tsk->group_leader;
1716 parent = tsk->real_parent;
1717 }
1718
1719 info.si_signo = SIGCHLD;
1720 info.si_errno = 0;
1721
1722
1723
1724 rcu_read_lock();
1725 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1726 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1727 rcu_read_unlock();
1728
1729 task_cputime(tsk, &utime, &stime);
1730 info.si_utime = nsec_to_clock_t(utime);
1731 info.si_stime = nsec_to_clock_t(stime);
1732
1733 info.si_code = why;
1734 switch (why) {
1735 case CLD_CONTINUED:
1736 info.si_status = SIGCONT;
1737 break;
1738 case CLD_STOPPED:
1739 info.si_status = tsk->signal->group_exit_code & 0x7f;
1740 break;
1741 case CLD_TRAPPED:
1742 info.si_status = tsk->exit_code & 0x7f;
1743 break;
1744 default:
1745 BUG();
1746 }
1747
1748 sighand = parent->sighand;
1749 spin_lock_irqsave(&sighand->siglock, flags);
1750 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1751 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1752 __group_send_sig_info(SIGCHLD, &info, parent);
1753
1754
1755
1756 __wake_up_parent(tsk, parent);
1757 spin_unlock_irqrestore(&sighand->siglock, flags);
1758}
1759
1760static inline int may_ptrace_stop(void)
1761{
1762 if (!likely(current->ptrace))
1763 return 0;
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777 if (unlikely(current->mm->core_state) &&
1778 unlikely(current->mm == current->parent->mm))
1779 return 0;
1780
1781 return 1;
1782}
1783
1784
1785
1786
1787
1788static int sigkill_pending(struct task_struct *tsk)
1789{
1790 return sigismember(&tsk->pending.signal, SIGKILL) ||
1791 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1792}
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1806 __releases(¤t->sighand->siglock)
1807 __acquires(¤t->sighand->siglock)
1808{
1809 bool gstop_done = false;
1810
1811 if (arch_ptrace_stop_needed(exit_code, info)) {
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823 spin_unlock_irq(¤t->sighand->siglock);
1824 arch_ptrace_stop(exit_code, info);
1825 spin_lock_irq(¤t->sighand->siglock);
1826 if (sigkill_pending(current))
1827 return;
1828 }
1829
1830
1831
1832
1833
1834
1835
1836
1837 set_current_state(TASK_TRACED);
1838
1839 current->last_siginfo = info;
1840 current->exit_code = exit_code;
1841
1842
1843
1844
1845
1846
1847
1848
1849 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1850 gstop_done = task_participate_group_stop(current);
1851
1852
1853 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1854 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1855 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1856
1857
1858 task_clear_jobctl_trapping(current);
1859
1860 spin_unlock_irq(¤t->sighand->siglock);
1861 read_lock(&tasklist_lock);
1862 if (may_ptrace_stop()) {
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873 do_notify_parent_cldstop(current, true, why);
1874 if (gstop_done && ptrace_reparented(current))
1875 do_notify_parent_cldstop(current, false, why);
1876
1877
1878
1879
1880
1881
1882
1883 preempt_disable();
1884 read_unlock(&tasklist_lock);
1885 preempt_enable_no_resched();
1886 freezable_schedule();
1887 } else {
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898 if (gstop_done)
1899 do_notify_parent_cldstop(current, false, why);
1900
1901
1902 __set_current_state(TASK_RUNNING);
1903 if (clear_code)
1904 current->exit_code = 0;
1905 read_unlock(&tasklist_lock);
1906 }
1907
1908
1909
1910
1911
1912
1913 spin_lock_irq(¤t->sighand->siglock);
1914 current->last_siginfo = NULL;
1915
1916
1917 current->jobctl &= ~JOBCTL_LISTENING;
1918
1919
1920
1921
1922
1923
1924 recalc_sigpending_tsk(current);
1925}
1926
1927static void ptrace_do_notify(int signr, int exit_code, int why)
1928{
1929 siginfo_t info;
1930
1931 memset(&info, 0, sizeof info);
1932 info.si_signo = signr;
1933 info.si_code = exit_code;
1934 info.si_pid = task_pid_vnr(current);
1935 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1936
1937
1938 ptrace_stop(exit_code, why, 1, &info);
1939}
1940
1941void ptrace_notify(int exit_code)
1942{
1943 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1944 if (unlikely(current->task_works))
1945 task_work_run();
1946
1947 spin_lock_irq(¤t->sighand->siglock);
1948 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1949 spin_unlock_irq(¤t->sighand->siglock);
1950}
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974static bool do_signal_stop(int signr)
1975 __releases(¤t->sighand->siglock)
1976{
1977 struct signal_struct *sig = current->signal;
1978
1979 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1980 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1981 struct task_struct *t;
1982
1983
1984 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1985
1986 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1987 unlikely(signal_group_exit(sig)))
1988 return false;
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2009 sig->group_exit_code = signr;
2010
2011 sig->group_stop_count = 0;
2012
2013 if (task_set_jobctl_pending(current, signr | gstop))
2014 sig->group_stop_count++;
2015
2016 t = current;
2017 while_each_thread(current, t) {
2018
2019
2020
2021
2022
2023 if (!task_is_stopped(t) &&
2024 task_set_jobctl_pending(t, signr | gstop)) {
2025 sig->group_stop_count++;
2026 if (likely(!(t->ptrace & PT_SEIZED)))
2027 signal_wake_up(t, 0);
2028 else
2029 ptrace_trap_notify(t);
2030 }
2031 }
2032 }
2033
2034 if (likely(!current->ptrace)) {
2035 int notify = 0;
2036
2037
2038
2039
2040
2041
2042 if (task_participate_group_stop(current))
2043 notify = CLD_STOPPED;
2044
2045 __set_current_state(TASK_STOPPED);
2046 spin_unlock_irq(¤t->sighand->siglock);
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057 if (notify) {
2058 read_lock(&tasklist_lock);
2059 do_notify_parent_cldstop(current, false, notify);
2060 read_unlock(&tasklist_lock);
2061 }
2062
2063
2064 freezable_schedule();
2065 return true;
2066 } else {
2067
2068
2069
2070
2071 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2072 return false;
2073 }
2074}
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091static void do_jobctl_trap(void)
2092{
2093 struct signal_struct *signal = current->signal;
2094 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2095
2096 if (current->ptrace & PT_SEIZED) {
2097 if (!signal->group_stop_count &&
2098 !(signal->flags & SIGNAL_STOP_STOPPED))
2099 signr = SIGTRAP;
2100 WARN_ON_ONCE(!signr);
2101 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2102 CLD_STOPPED);
2103 } else {
2104 WARN_ON_ONCE(!signr);
2105 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2106 current->exit_code = 0;
2107 }
2108}
2109
2110static int ptrace_signal(int signr, siginfo_t *info)
2111{
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2122 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2123
2124
2125 signr = current->exit_code;
2126 if (signr == 0)
2127 return signr;
2128
2129 current->exit_code = 0;
2130
2131
2132
2133
2134
2135
2136
2137 if (signr != info->si_signo) {
2138 info->si_signo = signr;
2139 info->si_errno = 0;
2140 info->si_code = SI_USER;
2141 rcu_read_lock();
2142 info->si_pid = task_pid_vnr(current->parent);
2143 info->si_uid = from_kuid_munged(current_user_ns(),
2144 task_uid(current->parent));
2145 rcu_read_unlock();
2146 }
2147
2148
2149 if (sigismember(¤t->blocked, signr)) {
2150 specific_send_sig_info(signr, info, current);
2151 signr = 0;
2152 }
2153
2154 return signr;
2155}
2156
2157int get_signal(struct ksignal *ksig)
2158{
2159 struct sighand_struct *sighand = current->sighand;
2160 struct signal_struct *signal = current->signal;
2161 int signr;
2162
2163 if (unlikely(current->task_works))
2164 task_work_run();
2165
2166 if (unlikely(uprobe_deny_signal()))
2167 return 0;
2168
2169
2170
2171
2172
2173
2174 try_to_freeze();
2175
2176relock:
2177 spin_lock_irq(&sighand->siglock);
2178
2179
2180
2181
2182
2183 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2184 int why;
2185
2186 if (signal->flags & SIGNAL_CLD_CONTINUED)
2187 why = CLD_CONTINUED;
2188 else
2189 why = CLD_STOPPED;
2190
2191 signal->flags &= ~SIGNAL_CLD_MASK;
2192
2193 spin_unlock_irq(&sighand->siglock);
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203 read_lock(&tasklist_lock);
2204 do_notify_parent_cldstop(current, false, why);
2205
2206 if (ptrace_reparented(current->group_leader))
2207 do_notify_parent_cldstop(current->group_leader,
2208 true, why);
2209 read_unlock(&tasklist_lock);
2210
2211 goto relock;
2212 }
2213
2214 for (;;) {
2215 struct k_sigaction *ka;
2216
2217 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2218 do_signal_stop(0))
2219 goto relock;
2220
2221 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2222 do_jobctl_trap();
2223 spin_unlock_irq(&sighand->siglock);
2224 goto relock;
2225 }
2226
2227 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2228
2229 if (!signr)
2230 break;
2231
2232 if (unlikely(current->ptrace) && signr != SIGKILL) {
2233 signr = ptrace_signal(signr, &ksig->info);
2234 if (!signr)
2235 continue;
2236 }
2237
2238 ka = &sighand->action[signr-1];
2239
2240
2241 trace_signal_deliver(signr, &ksig->info, ka);
2242
2243 if (ka->sa.sa_handler == SIG_IGN)
2244 continue;
2245 if (ka->sa.sa_handler != SIG_DFL) {
2246
2247 ksig->ka = *ka;
2248
2249 if (ka->sa.sa_flags & SA_ONESHOT)
2250 ka->sa.sa_handler = SIG_DFL;
2251
2252 break;
2253 }
2254
2255
2256
2257
2258 if (sig_kernel_ignore(signr))
2259 continue;
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2272 !sig_kernel_only(signr))
2273 continue;
2274
2275 if (sig_kernel_stop(signr)) {
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286 if (signr != SIGSTOP) {
2287 spin_unlock_irq(&sighand->siglock);
2288
2289
2290
2291 if (is_current_pgrp_orphaned())
2292 goto relock;
2293
2294 spin_lock_irq(&sighand->siglock);
2295 }
2296
2297 if (likely(do_signal_stop(ksig->info.si_signo))) {
2298
2299 goto relock;
2300 }
2301
2302
2303
2304
2305
2306 continue;
2307 }
2308
2309 spin_unlock_irq(&sighand->siglock);
2310
2311
2312
2313
2314 current->flags |= PF_SIGNALED;
2315
2316 if (sig_kernel_coredump(signr)) {
2317 if (print_fatal_signals)
2318 print_fatal_signal(ksig->info.si_signo);
2319 proc_coredump_connector(current);
2320
2321
2322
2323
2324
2325
2326
2327
2328 do_coredump(&ksig->info);
2329 }
2330
2331
2332
2333
2334 do_group_exit(ksig->info.si_signo);
2335
2336 }
2337 spin_unlock_irq(&sighand->siglock);
2338
2339 ksig->sig = signr;
2340 return ksig->sig > 0;
2341}
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353static void signal_delivered(struct ksignal *ksig, int stepping)
2354{
2355 sigset_t blocked;
2356
2357
2358
2359
2360
2361 clear_restore_sigmask();
2362
2363 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2364 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2365 sigaddset(&blocked, ksig->sig);
2366 set_current_blocked(&blocked);
2367 tracehook_signal_handler(stepping);
2368}
2369
2370void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2371{
2372 if (failed)
2373 force_sigsegv(ksig->sig, current);
2374 else
2375 signal_delivered(ksig, stepping);
2376}
2377
2378
2379
2380
2381
2382
2383static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2384{
2385 sigset_t retarget;
2386 struct task_struct *t;
2387
2388 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2389 if (sigisemptyset(&retarget))
2390 return;
2391
2392 t = tsk;
2393 while_each_thread(tsk, t) {
2394 if (t->flags & PF_EXITING)
2395 continue;
2396
2397 if (!has_pending_signals(&retarget, &t->blocked))
2398 continue;
2399
2400 sigandsets(&retarget, &retarget, &t->blocked);
2401
2402 if (!signal_pending(t))
2403 signal_wake_up(t, 0);
2404
2405 if (sigisemptyset(&retarget))
2406 break;
2407 }
2408}
2409
2410void exit_signals(struct task_struct *tsk)
2411{
2412 int group_stop = 0;
2413 sigset_t unblocked;
2414
2415
2416
2417
2418
2419 cgroup_threadgroup_change_begin(tsk);
2420
2421 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2422 tsk->flags |= PF_EXITING;
2423 cgroup_threadgroup_change_end(tsk);
2424 return;
2425 }
2426
2427 spin_lock_irq(&tsk->sighand->siglock);
2428
2429
2430
2431
2432 tsk->flags |= PF_EXITING;
2433
2434 cgroup_threadgroup_change_end(tsk);
2435
2436 if (!signal_pending(tsk))
2437 goto out;
2438
2439 unblocked = tsk->blocked;
2440 signotset(&unblocked);
2441 retarget_shared_pending(tsk, &unblocked);
2442
2443 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2444 task_participate_group_stop(tsk))
2445 group_stop = CLD_STOPPED;
2446out:
2447 spin_unlock_irq(&tsk->sighand->siglock);
2448
2449
2450
2451
2452
2453 if (unlikely(group_stop)) {
2454 read_lock(&tasklist_lock);
2455 do_notify_parent_cldstop(tsk, false, group_stop);
2456 read_unlock(&tasklist_lock);
2457 }
2458}
2459
2460EXPORT_SYMBOL(recalc_sigpending);
2461EXPORT_SYMBOL_GPL(dequeue_signal);
2462EXPORT_SYMBOL(flush_signals);
2463EXPORT_SYMBOL(force_sig);
2464EXPORT_SYMBOL(send_sig);
2465EXPORT_SYMBOL(send_sig_info);
2466EXPORT_SYMBOL(sigprocmask);
2467
2468
2469
2470
2471
2472
2473
2474
2475SYSCALL_DEFINE0(restart_syscall)
2476{
2477 struct restart_block *restart = ¤t->restart_block;
2478 return restart->fn(restart);
2479}
2480
2481long do_no_restart_syscall(struct restart_block *param)
2482{
2483 return -EINTR;
2484}
2485
2486static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2487{
2488 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2489 sigset_t newblocked;
2490
2491 sigandnsets(&newblocked, newset, ¤t->blocked);
2492 retarget_shared_pending(tsk, &newblocked);
2493 }
2494 tsk->blocked = *newset;
2495 recalc_sigpending();
2496}
2497
2498
2499
2500
2501
2502
2503
2504
2505void set_current_blocked(sigset_t *newset)
2506{
2507 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2508 __set_current_blocked(newset);
2509}
2510
2511void __set_current_blocked(const sigset_t *newset)
2512{
2513 struct task_struct *tsk = current;
2514
2515
2516
2517
2518
2519 if (sigequalsets(&tsk->blocked, newset))
2520 return;
2521
2522 spin_lock_irq(&tsk->sighand->siglock);
2523 __set_task_blocked(tsk, newset);
2524 spin_unlock_irq(&tsk->sighand->siglock);
2525}
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2536{
2537 struct task_struct *tsk = current;
2538 sigset_t newset;
2539
2540
2541 if (oldset)
2542 *oldset = tsk->blocked;
2543
2544 switch (how) {
2545 case SIG_BLOCK:
2546 sigorsets(&newset, &tsk->blocked, set);
2547 break;
2548 case SIG_UNBLOCK:
2549 sigandnsets(&newset, &tsk->blocked, set);
2550 break;
2551 case SIG_SETMASK:
2552 newset = *set;
2553 break;
2554 default:
2555 return -EINVAL;
2556 }
2557
2558 __set_current_blocked(&newset);
2559 return 0;
2560}
2561
2562
2563
2564
2565
2566
2567
2568
2569SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2570 sigset_t __user *, oset, size_t, sigsetsize)
2571{
2572 sigset_t old_set, new_set;
2573 int error;
2574
2575
2576 if (sigsetsize != sizeof(sigset_t))
2577 return -EINVAL;
2578
2579 old_set = current->blocked;
2580
2581 if (nset) {
2582 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2583 return -EFAULT;
2584 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2585
2586 error = sigprocmask(how, &new_set, NULL);
2587 if (error)
2588 return error;
2589 }
2590
2591 if (oset) {
2592 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2593 return -EFAULT;
2594 }
2595
2596 return 0;
2597}
2598
2599#ifdef CONFIG_COMPAT
2600COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2601 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2602{
2603#ifdef __BIG_ENDIAN
2604 sigset_t old_set = current->blocked;
2605
2606
2607 if (sigsetsize != sizeof(sigset_t))
2608 return -EINVAL;
2609
2610 if (nset) {
2611 compat_sigset_t new32;
2612 sigset_t new_set;
2613 int error;
2614 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2615 return -EFAULT;
2616
2617 sigset_from_compat(&new_set, &new32);
2618 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2619
2620 error = sigprocmask(how, &new_set, NULL);
2621 if (error)
2622 return error;
2623 }
2624 if (oset) {
2625 compat_sigset_t old32;
2626 sigset_to_compat(&old32, &old_set);
2627 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2628 return -EFAULT;
2629 }
2630 return 0;
2631#else
2632 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2633 (sigset_t __user *)oset, sigsetsize);
2634#endif
2635}
2636#endif
2637
2638static int do_sigpending(void *set, unsigned long sigsetsize)
2639{
2640 if (sigsetsize > sizeof(sigset_t))
2641 return -EINVAL;
2642
2643 spin_lock_irq(¤t->sighand->siglock);
2644 sigorsets(set, ¤t->pending.signal,
2645 ¤t->signal->shared_pending.signal);
2646 spin_unlock_irq(¤t->sighand->siglock);
2647
2648
2649 sigandsets(set, ¤t->blocked, set);
2650 return 0;
2651}
2652
2653
2654
2655
2656
2657
2658
2659SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2660{
2661 sigset_t set;
2662 int err = do_sigpending(&set, sigsetsize);
2663 if (!err && copy_to_user(uset, &set, sigsetsize))
2664 err = -EFAULT;
2665 return err;
2666}
2667
2668#ifdef CONFIG_COMPAT
2669COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2670 compat_size_t, sigsetsize)
2671{
2672#ifdef __BIG_ENDIAN
2673 sigset_t set;
2674 int err = do_sigpending(&set, sigsetsize);
2675 if (!err) {
2676 compat_sigset_t set32;
2677 sigset_to_compat(&set32, &set);
2678
2679 if (copy_to_user(uset, &set32, sigsetsize))
2680 err = -EFAULT;
2681 }
2682 return err;
2683#else
2684 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2685#endif
2686}
2687#endif
2688
2689#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2690
2691int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2692{
2693 int err;
2694
2695 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2696 return -EFAULT;
2697 if (from->si_code < 0)
2698 return __copy_to_user(to, from, sizeof(siginfo_t))
2699 ? -EFAULT : 0;
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709 err = __put_user(from->si_signo, &to->si_signo);
2710 err |= __put_user(from->si_errno, &to->si_errno);
2711 err |= __put_user((short)from->si_code, &to->si_code);
2712 switch (from->si_code & __SI_MASK) {
2713 case __SI_KILL:
2714 err |= __put_user(from->si_pid, &to->si_pid);
2715 err |= __put_user(from->si_uid, &to->si_uid);
2716 break;
2717 case __SI_TIMER:
2718 err |= __put_user(from->si_tid, &to->si_tid);
2719 err |= __put_user(from->si_overrun, &to->si_overrun);
2720 err |= __put_user(from->si_ptr, &to->si_ptr);
2721 break;
2722 case __SI_POLL:
2723 err |= __put_user(from->si_band, &to->si_band);
2724 err |= __put_user(from->si_fd, &to->si_fd);
2725 break;
2726 case __SI_FAULT:
2727 err |= __put_user(from->si_addr, &to->si_addr);
2728#ifdef __ARCH_SI_TRAPNO
2729 err |= __put_user(from->si_trapno, &to->si_trapno);
2730#endif
2731#ifdef BUS_MCEERR_AO
2732
2733
2734
2735
2736 if (from->si_signo == SIGBUS &&
2737 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2738 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2739#endif
2740#ifdef SEGV_BNDERR
2741 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2742 err |= __put_user(from->si_lower, &to->si_lower);
2743 err |= __put_user(from->si_upper, &to->si_upper);
2744 }
2745#endif
2746#ifdef SEGV_PKUERR
2747 if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2748 err |= __put_user(from->si_pkey, &to->si_pkey);
2749#endif
2750 break;
2751 case __SI_CHLD:
2752 err |= __put_user(from->si_pid, &to->si_pid);
2753 err |= __put_user(from->si_uid, &to->si_uid);
2754 err |= __put_user(from->si_status, &to->si_status);
2755 err |= __put_user(from->si_utime, &to->si_utime);
2756 err |= __put_user(from->si_stime, &to->si_stime);
2757 break;
2758 case __SI_RT:
2759 case __SI_MESGQ:
2760 err |= __put_user(from->si_pid, &to->si_pid);
2761 err |= __put_user(from->si_uid, &to->si_uid);
2762 err |= __put_user(from->si_ptr, &to->si_ptr);
2763 break;
2764#ifdef __ARCH_SIGSYS
2765 case __SI_SYS:
2766 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2767 err |= __put_user(from->si_syscall, &to->si_syscall);
2768 err |= __put_user(from->si_arch, &to->si_arch);
2769 break;
2770#endif
2771 default:
2772 err |= __put_user(from->si_pid, &to->si_pid);
2773 err |= __put_user(from->si_uid, &to->si_uid);
2774 break;
2775 }
2776 return err;
2777}
2778
2779#endif
2780
2781
2782
2783
2784
2785
2786
2787static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2788 const struct timespec *ts)
2789{
2790 ktime_t *to = NULL, timeout = KTIME_MAX;
2791 struct task_struct *tsk = current;
2792 sigset_t mask = *which;
2793 int sig, ret = 0;
2794
2795 if (ts) {
2796 if (!timespec_valid(ts))
2797 return -EINVAL;
2798 timeout = timespec_to_ktime(*ts);
2799 to = &timeout;
2800 }
2801
2802
2803
2804
2805 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2806 signotset(&mask);
2807
2808 spin_lock_irq(&tsk->sighand->siglock);
2809 sig = dequeue_signal(tsk, &mask, info);
2810 if (!sig && timeout) {
2811
2812
2813
2814
2815
2816
2817 tsk->real_blocked = tsk->blocked;
2818 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2819 recalc_sigpending();
2820 spin_unlock_irq(&tsk->sighand->siglock);
2821
2822 __set_current_state(TASK_INTERRUPTIBLE);
2823 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2824 HRTIMER_MODE_REL);
2825 spin_lock_irq(&tsk->sighand->siglock);
2826 __set_task_blocked(tsk, &tsk->real_blocked);
2827 sigemptyset(&tsk->real_blocked);
2828 sig = dequeue_signal(tsk, &mask, info);
2829 }
2830 spin_unlock_irq(&tsk->sighand->siglock);
2831
2832 if (sig)
2833 return sig;
2834 return ret ? -EINTR : -EAGAIN;
2835}
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2846 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2847 size_t, sigsetsize)
2848{
2849 sigset_t these;
2850 struct timespec ts;
2851 siginfo_t info;
2852 int ret;
2853
2854
2855 if (sigsetsize != sizeof(sigset_t))
2856 return -EINVAL;
2857
2858 if (copy_from_user(&these, uthese, sizeof(these)))
2859 return -EFAULT;
2860
2861 if (uts) {
2862 if (copy_from_user(&ts, uts, sizeof(ts)))
2863 return -EFAULT;
2864 }
2865
2866 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2867
2868 if (ret > 0 && uinfo) {
2869 if (copy_siginfo_to_user(uinfo, &info))
2870 ret = -EFAULT;
2871 }
2872
2873 return ret;
2874}
2875
2876#ifdef CONFIG_COMPAT
2877COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
2878 struct compat_siginfo __user *, uinfo,
2879 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
2880{
2881 compat_sigset_t s32;
2882 sigset_t s;
2883 struct timespec t;
2884 siginfo_t info;
2885 long ret;
2886
2887 if (sigsetsize != sizeof(sigset_t))
2888 return -EINVAL;
2889
2890 if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t)))
2891 return -EFAULT;
2892 sigset_from_compat(&s, &s32);
2893
2894 if (uts) {
2895 if (compat_get_timespec(&t, uts))
2896 return -EFAULT;
2897 }
2898
2899 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
2900
2901 if (ret > 0 && uinfo) {
2902 if (copy_siginfo_to_user32(uinfo, &info))
2903 ret = -EFAULT;
2904 }
2905
2906 return ret;
2907}
2908#endif
2909
2910
2911
2912
2913
2914
2915SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2916{
2917 struct siginfo info;
2918
2919 info.si_signo = sig;
2920 info.si_errno = 0;
2921 info.si_code = SI_USER;
2922 info.si_pid = task_tgid_vnr(current);
2923 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2924
2925 return kill_something_info(sig, &info, pid);
2926}
2927
2928static int
2929do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2930{
2931 struct task_struct *p;
2932 int error = -ESRCH;
2933
2934 rcu_read_lock();
2935 p = find_task_by_vpid(pid);
2936 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2937 error = check_kill_permission(sig, info, p);
2938
2939
2940
2941
2942 if (!error && sig) {
2943 error = do_send_sig_info(sig, info, p, false);
2944
2945
2946
2947
2948
2949 if (unlikely(error == -ESRCH))
2950 error = 0;
2951 }
2952 }
2953 rcu_read_unlock();
2954
2955 return error;
2956}
2957
2958static int do_tkill(pid_t tgid, pid_t pid, int sig)
2959{
2960 struct siginfo info = {};
2961
2962 info.si_signo = sig;
2963 info.si_errno = 0;
2964 info.si_code = SI_TKILL;
2965 info.si_pid = task_tgid_vnr(current);
2966 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2967
2968 return do_send_specific(tgid, pid, sig, &info);
2969}
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2982{
2983
2984 if (pid <= 0 || tgid <= 0)
2985 return -EINVAL;
2986
2987 return do_tkill(tgid, pid, sig);
2988}
2989
2990
2991
2992
2993
2994
2995
2996
2997SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2998{
2999
3000 if (pid <= 0)
3001 return -EINVAL;
3002
3003 return do_tkill(0, pid, sig);
3004}
3005
3006static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3007{
3008
3009
3010
3011 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3012 (task_pid_vnr(current) != pid))
3013 return -EPERM;
3014
3015 info->si_signo = sig;
3016
3017
3018 return kill_proc_info(sig, info, pid);
3019}
3020
3021
3022
3023
3024
3025
3026
3027SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3028 siginfo_t __user *, uinfo)
3029{
3030 siginfo_t info;
3031 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3032 return -EFAULT;
3033 return do_rt_sigqueueinfo(pid, sig, &info);
3034}
3035
3036#ifdef CONFIG_COMPAT
3037COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3038 compat_pid_t, pid,
3039 int, sig,
3040 struct compat_siginfo __user *, uinfo)
3041{
3042 siginfo_t info = {};
3043 int ret = copy_siginfo_from_user32(&info, uinfo);
3044 if (unlikely(ret))
3045 return ret;
3046 return do_rt_sigqueueinfo(pid, sig, &info);
3047}
3048#endif
3049
3050static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3051{
3052
3053 if (pid <= 0 || tgid <= 0)
3054 return -EINVAL;
3055
3056
3057
3058
3059 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3060 (task_pid_vnr(current) != pid))
3061 return -EPERM;
3062
3063 info->si_signo = sig;
3064
3065 return do_send_specific(tgid, pid, sig, info);
3066}
3067
3068SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3069 siginfo_t __user *, uinfo)
3070{
3071 siginfo_t info;
3072
3073 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3074 return -EFAULT;
3075
3076 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3077}
3078
3079#ifdef CONFIG_COMPAT
3080COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3081 compat_pid_t, tgid,
3082 compat_pid_t, pid,
3083 int, sig,
3084 struct compat_siginfo __user *, uinfo)
3085{
3086 siginfo_t info = {};
3087
3088 if (copy_siginfo_from_user32(&info, uinfo))
3089 return -EFAULT;
3090 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3091}
3092#endif
3093
3094
3095
3096
3097void kernel_sigaction(int sig, __sighandler_t action)
3098{
3099 spin_lock_irq(¤t->sighand->siglock);
3100 current->sighand->action[sig - 1].sa.sa_handler = action;
3101 if (action == SIG_IGN) {
3102 sigset_t mask;
3103
3104 sigemptyset(&mask);
3105 sigaddset(&mask, sig);
3106
3107 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3108 flush_sigqueue_mask(&mask, ¤t->pending);
3109 recalc_sigpending();
3110 }
3111 spin_unlock_irq(¤t->sighand->siglock);
3112}
3113EXPORT_SYMBOL(kernel_sigaction);
3114
3115void __weak sigaction_compat_abi(struct k_sigaction *act,
3116 struct k_sigaction *oact)
3117{
3118}
3119
3120int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3121{
3122 struct task_struct *p = current, *t;
3123 struct k_sigaction *k;
3124 sigset_t mask;
3125
3126 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3127 return -EINVAL;
3128
3129 k = &p->sighand->action[sig-1];
3130
3131 spin_lock_irq(&p->sighand->siglock);
3132 if (oact)
3133 *oact = *k;
3134
3135 sigaction_compat_abi(act, oact);
3136
3137 if (act) {
3138 sigdelsetmask(&act->sa.sa_mask,
3139 sigmask(SIGKILL) | sigmask(SIGSTOP));
3140 *k = *act;
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3153 sigemptyset(&mask);
3154 sigaddset(&mask, sig);
3155 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3156 for_each_thread(p, t)
3157 flush_sigqueue_mask(&mask, &t->pending);
3158 }
3159 }
3160
3161 spin_unlock_irq(&p->sighand->siglock);
3162 return 0;
3163}
3164
3165static int
3166do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
3167{
3168 struct task_struct *t = current;
3169
3170 if (oss) {
3171 memset(oss, 0, sizeof(stack_t));
3172 oss->ss_sp = (void __user *) t->sas_ss_sp;
3173 oss->ss_size = t->sas_ss_size;
3174 oss->ss_flags = sas_ss_flags(sp) |
3175 (current->sas_ss_flags & SS_FLAG_BITS);
3176 }
3177
3178 if (ss) {
3179 void __user *ss_sp = ss->ss_sp;
3180 size_t ss_size = ss->ss_size;
3181 unsigned ss_flags = ss->ss_flags;
3182 int ss_mode;
3183
3184 if (unlikely(on_sig_stack(sp)))
3185 return -EPERM;
3186
3187 ss_mode = ss_flags & ~SS_FLAG_BITS;
3188 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3189 ss_mode != 0))
3190 return -EINVAL;
3191
3192 if (ss_mode == SS_DISABLE) {
3193 ss_size = 0;
3194 ss_sp = NULL;
3195 } else {
3196 if (unlikely(ss_size < MINSIGSTKSZ))
3197 return -ENOMEM;
3198 }
3199
3200 t->sas_ss_sp = (unsigned long) ss_sp;
3201 t->sas_ss_size = ss_size;
3202 t->sas_ss_flags = ss_flags;
3203 }
3204 return 0;
3205}
3206
3207SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3208{
3209 stack_t new, old;
3210 int err;
3211 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3212 return -EFAULT;
3213 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3214 current_user_stack_pointer());
3215 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3216 err = -EFAULT;
3217 return err;
3218}
3219
3220int restore_altstack(const stack_t __user *uss)
3221{
3222 stack_t new;
3223 if (copy_from_user(&new, uss, sizeof(stack_t)))
3224 return -EFAULT;
3225 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
3226
3227 return 0;
3228}
3229
3230int __save_altstack(stack_t __user *uss, unsigned long sp)
3231{
3232 struct task_struct *t = current;
3233 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3234 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3235 __put_user(t->sas_ss_size, &uss->ss_size);
3236 if (err)
3237 return err;
3238 if (t->sas_ss_flags & SS_AUTODISARM)
3239 sas_ss_reset(t);
3240 return 0;
3241}
3242
3243#ifdef CONFIG_COMPAT
3244COMPAT_SYSCALL_DEFINE2(sigaltstack,
3245 const compat_stack_t __user *, uss_ptr,
3246 compat_stack_t __user *, uoss_ptr)
3247{
3248 stack_t uss, uoss;
3249 int ret;
3250
3251 if (uss_ptr) {
3252 compat_stack_t uss32;
3253 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3254 return -EFAULT;
3255 uss.ss_sp = compat_ptr(uss32.ss_sp);
3256 uss.ss_flags = uss32.ss_flags;
3257 uss.ss_size = uss32.ss_size;
3258 }
3259 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
3260 compat_user_stack_pointer());
3261 if (ret >= 0 && uoss_ptr) {
3262 compat_stack_t old;
3263 memset(&old, 0, sizeof(old));
3264 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3265 old.ss_flags = uoss.ss_flags;
3266 old.ss_size = uoss.ss_size;
3267 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
3268 ret = -EFAULT;
3269 }
3270 return ret;
3271}
3272
3273int compat_restore_altstack(const compat_stack_t __user *uss)
3274{
3275 int err = compat_sys_sigaltstack(uss, NULL);
3276
3277 return err == -EFAULT ? err : 0;
3278}
3279
3280int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3281{
3282 int err;
3283 struct task_struct *t = current;
3284 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3285 &uss->ss_sp) |
3286 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3287 __put_user(t->sas_ss_size, &uss->ss_size);
3288 if (err)
3289 return err;
3290 if (t->sas_ss_flags & SS_AUTODISARM)
3291 sas_ss_reset(t);
3292 return 0;
3293}
3294#endif
3295
3296#ifdef __ARCH_WANT_SYS_SIGPENDING
3297
3298
3299
3300
3301
3302SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3303{
3304 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3305}
3306
3307#ifdef CONFIG_COMPAT
3308COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3309{
3310#ifdef __BIG_ENDIAN
3311 sigset_t set;
3312 int err = do_sigpending(&set, sizeof(set.sig[0]));
3313 if (!err)
3314 err = put_user(set.sig[0], set32);
3315 return err;
3316#else
3317 return sys_rt_sigpending((sigset_t __user *)set32, sizeof(*set32));
3318#endif
3319}
3320#endif
3321
3322#endif
3323
3324#ifdef __ARCH_WANT_SYS_SIGPROCMASK
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3336 old_sigset_t __user *, oset)
3337{
3338 old_sigset_t old_set, new_set;
3339 sigset_t new_blocked;
3340
3341 old_set = current->blocked.sig[0];
3342
3343 if (nset) {
3344 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3345 return -EFAULT;
3346
3347 new_blocked = current->blocked;
3348
3349 switch (how) {
3350 case SIG_BLOCK:
3351 sigaddsetmask(&new_blocked, new_set);
3352 break;
3353 case SIG_UNBLOCK:
3354 sigdelsetmask(&new_blocked, new_set);
3355 break;
3356 case SIG_SETMASK:
3357 new_blocked.sig[0] = new_set;
3358 break;
3359 default:
3360 return -EINVAL;
3361 }
3362
3363 set_current_blocked(&new_blocked);
3364 }
3365
3366 if (oset) {
3367 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3368 return -EFAULT;
3369 }
3370
3371 return 0;
3372}
3373#endif
3374
3375#ifndef CONFIG_ODD_RT_SIGACTION
3376
3377
3378
3379
3380
3381
3382
3383SYSCALL_DEFINE4(rt_sigaction, int, sig,
3384 const struct sigaction __user *, act,
3385 struct sigaction __user *, oact,
3386 size_t, sigsetsize)
3387{
3388 struct k_sigaction new_sa, old_sa;
3389 int ret = -EINVAL;
3390
3391
3392 if (sigsetsize != sizeof(sigset_t))
3393 goto out;
3394
3395 if (act) {
3396 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3397 return -EFAULT;
3398 }
3399
3400 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3401
3402 if (!ret && oact) {
3403 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3404 return -EFAULT;
3405 }
3406out:
3407 return ret;
3408}
3409#ifdef CONFIG_COMPAT
3410COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3411 const struct compat_sigaction __user *, act,
3412 struct compat_sigaction __user *, oact,
3413 compat_size_t, sigsetsize)
3414{
3415 struct k_sigaction new_ka, old_ka;
3416 compat_sigset_t mask;
3417#ifdef __ARCH_HAS_SA_RESTORER
3418 compat_uptr_t restorer;
3419#endif
3420 int ret;
3421
3422
3423 if (sigsetsize != sizeof(compat_sigset_t))
3424 return -EINVAL;
3425
3426 if (act) {
3427 compat_uptr_t handler;
3428 ret = get_user(handler, &act->sa_handler);
3429 new_ka.sa.sa_handler = compat_ptr(handler);
3430#ifdef __ARCH_HAS_SA_RESTORER
3431 ret |= get_user(restorer, &act->sa_restorer);
3432 new_ka.sa.sa_restorer = compat_ptr(restorer);
3433#endif
3434 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3435 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3436 if (ret)
3437 return -EFAULT;
3438 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3439 }
3440
3441 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3442 if (!ret && oact) {
3443 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3444 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3445 &oact->sa_handler);
3446 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3447 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3448#ifdef __ARCH_HAS_SA_RESTORER
3449 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3450 &oact->sa_restorer);
3451#endif
3452 }
3453 return ret;
3454}
3455#endif
3456#endif
3457
3458#ifdef CONFIG_OLD_SIGACTION
3459SYSCALL_DEFINE3(sigaction, int, sig,
3460 const struct old_sigaction __user *, act,
3461 struct old_sigaction __user *, oact)
3462{
3463 struct k_sigaction new_ka, old_ka;
3464 int ret;
3465
3466 if (act) {
3467 old_sigset_t mask;
3468 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3469 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3470 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3471 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3472 __get_user(mask, &act->sa_mask))
3473 return -EFAULT;
3474#ifdef __ARCH_HAS_KA_RESTORER
3475 new_ka.ka_restorer = NULL;
3476#endif
3477 siginitset(&new_ka.sa.sa_mask, mask);
3478 }
3479
3480 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3481
3482 if (!ret && oact) {
3483 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3484 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3485 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3486 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3487 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3488 return -EFAULT;
3489 }
3490
3491 return ret;
3492}
3493#endif
3494#ifdef CONFIG_COMPAT_OLD_SIGACTION
3495COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3496 const struct compat_old_sigaction __user *, act,
3497 struct compat_old_sigaction __user *, oact)
3498{
3499 struct k_sigaction new_ka, old_ka;
3500 int ret;
3501 compat_old_sigset_t mask;
3502 compat_uptr_t handler, restorer;
3503
3504 if (act) {
3505 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3506 __get_user(handler, &act->sa_handler) ||
3507 __get_user(restorer, &act->sa_restorer) ||
3508 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3509 __get_user(mask, &act->sa_mask))
3510 return -EFAULT;
3511
3512#ifdef __ARCH_HAS_KA_RESTORER
3513 new_ka.ka_restorer = NULL;
3514#endif
3515 new_ka.sa.sa_handler = compat_ptr(handler);
3516 new_ka.sa.sa_restorer = compat_ptr(restorer);
3517 siginitset(&new_ka.sa.sa_mask, mask);
3518 }
3519
3520 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3521
3522 if (!ret && oact) {
3523 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3524 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3525 &oact->sa_handler) ||
3526 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3527 &oact->sa_restorer) ||
3528 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3529 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3530 return -EFAULT;
3531 }
3532 return ret;
3533}
3534#endif
3535
3536#ifdef CONFIG_SGETMASK_SYSCALL
3537
3538
3539
3540
3541SYSCALL_DEFINE0(sgetmask)
3542{
3543
3544 return current->blocked.sig[0];
3545}
3546
3547SYSCALL_DEFINE1(ssetmask, int, newmask)
3548{
3549 int old = current->blocked.sig[0];
3550 sigset_t newset;
3551
3552 siginitset(&newset, newmask);
3553 set_current_blocked(&newset);
3554
3555 return old;
3556}
3557#endif
3558
3559#ifdef __ARCH_WANT_SYS_SIGNAL
3560
3561
3562
3563SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3564{
3565 struct k_sigaction new_sa, old_sa;
3566 int ret;
3567
3568 new_sa.sa.sa_handler = handler;
3569 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3570 sigemptyset(&new_sa.sa.sa_mask);
3571
3572 ret = do_sigaction(sig, &new_sa, &old_sa);
3573
3574 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3575}
3576#endif
3577
3578#ifdef __ARCH_WANT_SYS_PAUSE
3579
3580SYSCALL_DEFINE0(pause)
3581{
3582 while (!signal_pending(current)) {
3583 __set_current_state(TASK_INTERRUPTIBLE);
3584 schedule();
3585 }
3586 return -ERESTARTNOHAND;
3587}
3588
3589#endif
3590
3591static int sigsuspend(sigset_t *set)
3592{
3593 current->saved_sigmask = current->blocked;
3594 set_current_blocked(set);
3595
3596 while (!signal_pending(current)) {
3597 __set_current_state(TASK_INTERRUPTIBLE);
3598 schedule();
3599 }
3600 set_restore_sigmask();
3601 return -ERESTARTNOHAND;
3602}
3603
3604
3605
3606
3607
3608
3609
3610SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3611{
3612 sigset_t newset;
3613
3614
3615 if (sigsetsize != sizeof(sigset_t))
3616 return -EINVAL;
3617
3618 if (copy_from_user(&newset, unewset, sizeof(newset)))
3619 return -EFAULT;
3620 return sigsuspend(&newset);
3621}
3622
3623#ifdef CONFIG_COMPAT
3624COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3625{
3626#ifdef __BIG_ENDIAN
3627 sigset_t newset;
3628 compat_sigset_t newset32;
3629
3630
3631 if (sigsetsize != sizeof(sigset_t))
3632 return -EINVAL;
3633
3634 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3635 return -EFAULT;
3636 sigset_from_compat(&newset, &newset32);
3637 return sigsuspend(&newset);
3638#else
3639
3640 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3641#endif
3642}
3643#endif
3644
3645#ifdef CONFIG_OLD_SIGSUSPEND
3646SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3647{
3648 sigset_t blocked;
3649 siginitset(&blocked, mask);
3650 return sigsuspend(&blocked);
3651}
3652#endif
3653#ifdef CONFIG_OLD_SIGSUSPEND3
3654SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3655{
3656 sigset_t blocked;
3657 siginitset(&blocked, mask);
3658 return sigsuspend(&blocked);
3659}
3660#endif
3661
3662__weak const char *arch_vma_name(struct vm_area_struct *vma)
3663{
3664 return NULL;
3665}
3666
3667void __init signals_init(void)
3668{
3669
3670 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3671 != offsetof(struct siginfo, _sifields._pad));
3672
3673 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3674}
3675
3676#ifdef CONFIG_KGDB_KDB
3677#include <linux/kdb.h>
3678
3679
3680
3681
3682
3683
3684void
3685kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3686{
3687 static struct task_struct *kdb_prev_t;
3688 int sig, new_t;
3689 if (!spin_trylock(&t->sighand->siglock)) {
3690 kdb_printf("Can't do kill command now.\n"
3691 "The sigmask lock is held somewhere else in "
3692 "kernel, try again later\n");
3693 return;
3694 }
3695 spin_unlock(&t->sighand->siglock);
3696 new_t = kdb_prev_t != t;
3697 kdb_prev_t = t;
3698 if (t->state != TASK_RUNNING && new_t) {
3699 kdb_printf("Process is not RUNNING, sending a signal from "
3700 "kdb risks deadlock\n"
3701 "on the run queue locks. "
3702 "The signal has _not_ been sent.\n"
3703 "Reissue the kill command if you want to risk "
3704 "the deadlock.\n");
3705 return;
3706 }
3707 sig = info->si_signo;
3708 if (send_sig_info(sig, info, t))
3709 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3710 sig, t->pid);
3711 else
3712 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3713}
3714#endif
3715