1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/init.h>
17#include <linux/sched/mm.h>
18#include <linux/sched/user.h>
19#include <linux/sched/debug.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/sched/cputime.h>
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/proc_fs.h>
26#include <linux/tty.h>
27#include <linux/binfmts.h>
28#include <linux/coredump.h>
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/ptrace.h>
32#include <linux/signal.h>
33#include <linux/signalfd.h>
34#include <linux/ratelimit.h>
35#include <linux/tracehook.h>
36#include <linux/capability.h>
37#include <linux/freezer.h>
38#include <linux/pid_namespace.h>
39#include <linux/nsproxy.h>
40#include <linux/user_namespace.h>
41#include <linux/uprobes.h>
42#include <linux/compat.h>
43#include <linux/cn_proc.h>
44#include <linux/compiler.h>
45#include <linux/posix-timers.h>
46#include <linux/cgroup.h>
47#include <linux/audit.h>
48
49#define CREATE_TRACE_POINTS
50#include <trace/events/signal.h>
51
52#include <asm/param.h>
53#include <linux/uaccess.h>
54#include <asm/unistd.h>
55#include <asm/siginfo.h>
56#include <asm/cacheflush.h>
57
58
59
60
61
62static struct kmem_cache *sigqueue_cachep;
63
64int print_fatal_signals __read_mostly;
65
66static void __user *sig_handler(struct task_struct *t, int sig)
67{
68 return t->sighand->action[sig - 1].sa.sa_handler;
69}
70
71static inline bool sig_handler_ignored(void __user *handler, int sig)
72{
73
74 return handler == SIG_IGN ||
75 (handler == SIG_DFL && sig_kernel_ignore(sig));
76}
77
78static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
79{
80 void __user *handler;
81
82 handler = sig_handler(t, sig);
83
84
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
86 return true;
87
88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
90 return true;
91
92
93 if (unlikely((t->flags & PF_KTHREAD) &&
94 (handler == SIG_KTHREAD_KERNEL) && !force))
95 return true;
96
97 return sig_handler_ignored(handler, sig);
98}
99
100static bool sig_ignored(struct task_struct *t, int sig, bool force)
101{
102
103
104
105
106
107 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
108 return false;
109
110
111
112
113
114
115 if (t->ptrace && sig != SIGKILL)
116 return false;
117
118 return sig_task_ignored(t, sig, force);
119}
120
121
122
123
124
125static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
126{
127 unsigned long ready;
128 long i;
129
130 switch (_NSIG_WORDS) {
131 default:
132 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
133 ready |= signal->sig[i] &~ blocked->sig[i];
134 break;
135
136 case 4: ready = signal->sig[3] &~ blocked->sig[3];
137 ready |= signal->sig[2] &~ blocked->sig[2];
138 ready |= signal->sig[1] &~ blocked->sig[1];
139 ready |= signal->sig[0] &~ blocked->sig[0];
140 break;
141
142 case 2: ready = signal->sig[1] &~ blocked->sig[1];
143 ready |= signal->sig[0] &~ blocked->sig[0];
144 break;
145
146 case 1: ready = signal->sig[0] &~ blocked->sig[0];
147 }
148 return ready != 0;
149}
150
151#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
152
153static bool recalc_sigpending_tsk(struct task_struct *t)
154{
155 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
156 PENDING(&t->pending, &t->blocked) ||
157 PENDING(&t->signal->shared_pending, &t->blocked) ||
158 cgroup_task_frozen(t)) {
159 set_tsk_thread_flag(t, TIF_SIGPENDING);
160 return true;
161 }
162
163
164
165
166
167
168 return false;
169}
170
171
172
173
174
175void recalc_sigpending_and_wake(struct task_struct *t)
176{
177 if (recalc_sigpending_tsk(t))
178 signal_wake_up(t, 0);
179}
180
181void recalc_sigpending(void)
182{
183 if (!recalc_sigpending_tsk(current) && !freezing(current))
184 clear_thread_flag(TIF_SIGPENDING);
185
186}
187EXPORT_SYMBOL(recalc_sigpending);
188
189void calculate_sigpending(void)
190{
191
192
193
194 spin_lock_irq(¤t->sighand->siglock);
195 set_tsk_thread_flag(current, TIF_SIGPENDING);
196 recalc_sigpending();
197 spin_unlock_irq(¤t->sighand->siglock);
198}
199
200
201
202#define SYNCHRONOUS_MASK \
203 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
204 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
205
206int next_signal(struct sigpending *pending, sigset_t *mask)
207{
208 unsigned long i, *s, *m, x;
209 int sig = 0;
210
211 s = pending->signal.sig;
212 m = mask->sig;
213
214
215
216
217
218 x = *s &~ *m;
219 if (x) {
220 if (x & SYNCHRONOUS_MASK)
221 x &= SYNCHRONOUS_MASK;
222 sig = ffz(~x) + 1;
223 return sig;
224 }
225
226 switch (_NSIG_WORDS) {
227 default:
228 for (i = 1; i < _NSIG_WORDS; ++i) {
229 x = *++s &~ *++m;
230 if (!x)
231 continue;
232 sig = ffz(~x) + i*_NSIG_BPW + 1;
233 break;
234 }
235 break;
236
237 case 2:
238 x = s[1] &~ m[1];
239 if (!x)
240 break;
241 sig = ffz(~x) + _NSIG_BPW + 1;
242 break;
243
244 case 1:
245
246 break;
247 }
248
249 return sig;
250}
251
252static inline void print_dropped_signal(int sig)
253{
254 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
255
256 if (!print_fatal_signals)
257 return;
258
259 if (!__ratelimit(&ratelimit_state))
260 return;
261
262 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
263 current->comm, current->pid, sig);
264}
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
284{
285 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
286 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
287 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
288
289 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
290 return false;
291
292 if (mask & JOBCTL_STOP_SIGMASK)
293 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
294
295 task->jobctl |= mask;
296 return true;
297}
298
299
300
301
302
303
304
305
306
307
308
309
310
311void task_clear_jobctl_trapping(struct task_struct *task)
312{
313 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
314 task->jobctl &= ~JOBCTL_TRAPPING;
315 smp_mb();
316 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
317 }
318}
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
336{
337 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
338
339 if (mask & JOBCTL_STOP_PENDING)
340 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
341
342 task->jobctl &= ~mask;
343
344 if (!(task->jobctl & JOBCTL_PENDING_MASK))
345 task_clear_jobctl_trapping(task);
346}
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364static bool task_participate_group_stop(struct task_struct *task)
365{
366 struct signal_struct *sig = task->signal;
367 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
368
369 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
370
371 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
372
373 if (!consume)
374 return false;
375
376 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
377 sig->group_stop_count--;
378
379
380
381
382
383 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
384 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
385 return true;
386 }
387 return false;
388}
389
390void task_join_group_stop(struct task_struct *task)
391{
392 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
393 struct signal_struct *sig = current->signal;
394
395 if (sig->group_stop_count) {
396 sig->group_stop_count++;
397 mask |= JOBCTL_STOP_CONSUME;
398 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
399 return;
400
401
402 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
403}
404
405
406
407
408
409
410static struct sigqueue *
411__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
412 int override_rlimit, const unsigned int sigqueue_flags)
413{
414 struct sigqueue *q = NULL;
415 struct ucounts *ucounts = NULL;
416 long sigpending;
417
418
419
420
421
422
423
424
425
426 rcu_read_lock();
427 ucounts = task_ucounts(t);
428 sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
429 switch (sigpending) {
430 case 1:
431 if (likely(get_ucounts(ucounts)))
432 break;
433 fallthrough;
434 case LONG_MAX:
435
436
437
438
439 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
440 rcu_read_unlock();
441 return NULL;
442 }
443 rcu_read_unlock();
444
445 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
446 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
447 } else {
448 print_dropped_signal(sig);
449 }
450
451 if (unlikely(q == NULL)) {
452 if (dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
453 put_ucounts(ucounts);
454 } else {
455 INIT_LIST_HEAD(&q->list);
456 q->flags = sigqueue_flags;
457 q->ucounts = ucounts;
458 }
459 return q;
460}
461
462static void __sigqueue_free(struct sigqueue *q)
463{
464 if (q->flags & SIGQUEUE_PREALLOC)
465 return;
466 if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
467 put_ucounts(q->ucounts);
468 q->ucounts = NULL;
469 }
470 kmem_cache_free(sigqueue_cachep, q);
471}
472
473void flush_sigqueue(struct sigpending *queue)
474{
475 struct sigqueue *q;
476
477 sigemptyset(&queue->signal);
478 while (!list_empty(&queue->list)) {
479 q = list_entry(queue->list.next, struct sigqueue , list);
480 list_del_init(&q->list);
481 __sigqueue_free(q);
482 }
483}
484
485
486
487
488void flush_signals(struct task_struct *t)
489{
490 unsigned long flags;
491
492 spin_lock_irqsave(&t->sighand->siglock, flags);
493 clear_tsk_thread_flag(t, TIF_SIGPENDING);
494 flush_sigqueue(&t->pending);
495 flush_sigqueue(&t->signal->shared_pending);
496 spin_unlock_irqrestore(&t->sighand->siglock, flags);
497}
498EXPORT_SYMBOL(flush_signals);
499
500#ifdef CONFIG_POSIX_TIMERS
501static void __flush_itimer_signals(struct sigpending *pending)
502{
503 sigset_t signal, retain;
504 struct sigqueue *q, *n;
505
506 signal = pending->signal;
507 sigemptyset(&retain);
508
509 list_for_each_entry_safe(q, n, &pending->list, list) {
510 int sig = q->info.si_signo;
511
512 if (likely(q->info.si_code != SI_TIMER)) {
513 sigaddset(&retain, sig);
514 } else {
515 sigdelset(&signal, sig);
516 list_del_init(&q->list);
517 __sigqueue_free(q);
518 }
519 }
520
521 sigorsets(&pending->signal, &signal, &retain);
522}
523
524void flush_itimer_signals(void)
525{
526 struct task_struct *tsk = current;
527 unsigned long flags;
528
529 spin_lock_irqsave(&tsk->sighand->siglock, flags);
530 __flush_itimer_signals(&tsk->pending);
531 __flush_itimer_signals(&tsk->signal->shared_pending);
532 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
533}
534#endif
535
536void ignore_signals(struct task_struct *t)
537{
538 int i;
539
540 for (i = 0; i < _NSIG; ++i)
541 t->sighand->action[i].sa.sa_handler = SIG_IGN;
542
543 flush_signals(t);
544}
545
546
547
548
549
550void
551flush_signal_handlers(struct task_struct *t, int force_default)
552{
553 int i;
554 struct k_sigaction *ka = &t->sighand->action[0];
555 for (i = _NSIG ; i != 0 ; i--) {
556 if (force_default || ka->sa.sa_handler != SIG_IGN)
557 ka->sa.sa_handler = SIG_DFL;
558 ka->sa.sa_flags = 0;
559#ifdef __ARCH_HAS_SA_RESTORER
560 ka->sa.sa_restorer = NULL;
561#endif
562 sigemptyset(&ka->sa.sa_mask);
563 ka++;
564 }
565}
566
567bool unhandled_signal(struct task_struct *tsk, int sig)
568{
569 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
570 if (is_global_init(tsk))
571 return true;
572
573 if (handler != SIG_IGN && handler != SIG_DFL)
574 return false;
575
576
577 return !tsk->ptrace;
578}
579
580static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
581 bool *resched_timer)
582{
583 struct sigqueue *q, *first = NULL;
584
585
586
587
588
589 list_for_each_entry(q, &list->list, list) {
590 if (q->info.si_signo == sig) {
591 if (first)
592 goto still_pending;
593 first = q;
594 }
595 }
596
597 sigdelset(&list->signal, sig);
598
599 if (first) {
600still_pending:
601 list_del_init(&first->list);
602 copy_siginfo(info, &first->info);
603
604 *resched_timer =
605 (first->flags & SIGQUEUE_PREALLOC) &&
606 (info->si_code == SI_TIMER) &&
607 (info->si_sys_private);
608
609 __sigqueue_free(first);
610 } else {
611
612
613
614
615
616 clear_siginfo(info);
617 info->si_signo = sig;
618 info->si_errno = 0;
619 info->si_code = SI_USER;
620 info->si_pid = 0;
621 info->si_uid = 0;
622 }
623}
624
625static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
626 kernel_siginfo_t *info, bool *resched_timer)
627{
628 int sig = next_signal(pending, mask);
629
630 if (sig)
631 collect_signal(sig, pending, info, resched_timer);
632 return sig;
633}
634
635
636
637
638
639
640
641int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
642{
643 bool resched_timer = false;
644 int signr;
645
646
647
648
649 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
650 if (!signr) {
651 signr = __dequeue_signal(&tsk->signal->shared_pending,
652 mask, info, &resched_timer);
653#ifdef CONFIG_POSIX_TIMERS
654
655
656
657
658
659
660
661
662
663
664
665
666
667 if (unlikely(signr == SIGALRM)) {
668 struct hrtimer *tmr = &tsk->signal->real_timer;
669
670 if (!hrtimer_is_queued(tmr) &&
671 tsk->signal->it_real_incr != 0) {
672 hrtimer_forward(tmr, tmr->base->get_time(),
673 tsk->signal->it_real_incr);
674 hrtimer_restart(tmr);
675 }
676 }
677#endif
678 }
679
680 recalc_sigpending();
681 if (!signr)
682 return 0;
683
684 if (unlikely(sig_kernel_stop(signr))) {
685
686
687
688
689
690
691
692
693
694
695
696
697 current->jobctl |= JOBCTL_STOP_DEQUEUED;
698 }
699#ifdef CONFIG_POSIX_TIMERS
700 if (resched_timer) {
701
702
703
704
705
706
707 spin_unlock(&tsk->sighand->siglock);
708 posixtimer_rearm(info);
709 spin_lock(&tsk->sighand->siglock);
710
711
712 info->si_sys_private = 0;
713 }
714#endif
715 return signr;
716}
717EXPORT_SYMBOL_GPL(dequeue_signal);
718
719static int dequeue_synchronous_signal(kernel_siginfo_t *info)
720{
721 struct task_struct *tsk = current;
722 struct sigpending *pending = &tsk->pending;
723 struct sigqueue *q, *sync = NULL;
724
725
726
727
728 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
729 return 0;
730
731
732
733
734 list_for_each_entry(q, &pending->list, list) {
735
736 if ((q->info.si_code > SI_USER) &&
737 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
738 sync = q;
739 goto next;
740 }
741 }
742 return 0;
743next:
744
745
746
747 list_for_each_entry_continue(q, &pending->list, list) {
748 if (q->info.si_signo == sync->info.si_signo)
749 goto still_pending;
750 }
751
752 sigdelset(&pending->signal, sync->info.si_signo);
753 recalc_sigpending();
754still_pending:
755 list_del_init(&sync->list);
756 copy_siginfo(info, &sync->info);
757 __sigqueue_free(sync);
758 return info->si_signo;
759}
760
761
762
763
764
765
766
767
768
769
770
771
772void signal_wake_up_state(struct task_struct *t, unsigned int state)
773{
774 set_tsk_thread_flag(t, TIF_SIGPENDING);
775
776
777
778
779
780
781
782 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
783 kick_process(t);
784}
785
786
787
788
789
790
791
792static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
793{
794 struct sigqueue *q, *n;
795 sigset_t m;
796
797 sigandsets(&m, mask, &s->signal);
798 if (sigisemptyset(&m))
799 return;
800
801 sigandnsets(&s->signal, &s->signal, mask);
802 list_for_each_entry_safe(q, n, &s->list, list) {
803 if (sigismember(mask, q->info.si_signo)) {
804 list_del_init(&q->list);
805 __sigqueue_free(q);
806 }
807 }
808}
809
810static inline int is_si_special(const struct kernel_siginfo *info)
811{
812 return info <= SEND_SIG_PRIV;
813}
814
815static inline bool si_fromuser(const struct kernel_siginfo *info)
816{
817 return info == SEND_SIG_NOINFO ||
818 (!is_si_special(info) && SI_FROMUSER(info));
819}
820
821
822
823
824static bool kill_ok_by_cred(struct task_struct *t)
825{
826 const struct cred *cred = current_cred();
827 const struct cred *tcred = __task_cred(t);
828
829 return uid_eq(cred->euid, tcred->suid) ||
830 uid_eq(cred->euid, tcred->uid) ||
831 uid_eq(cred->uid, tcred->suid) ||
832 uid_eq(cred->uid, tcred->uid) ||
833 ns_capable(tcred->user_ns, CAP_KILL);
834}
835
836
837
838
839
840static int check_kill_permission(int sig, struct kernel_siginfo *info,
841 struct task_struct *t)
842{
843 struct pid *sid;
844 int error;
845
846 if (!valid_signal(sig))
847 return -EINVAL;
848
849 if (!si_fromuser(info))
850 return 0;
851
852 error = audit_signal_info(sig, t);
853 if (error)
854 return error;
855
856 if (!same_thread_group(current, t) &&
857 !kill_ok_by_cred(t)) {
858 switch (sig) {
859 case SIGCONT:
860 sid = task_session(t);
861
862
863
864
865 if (!sid || sid == task_session(current))
866 break;
867 fallthrough;
868 default:
869 return -EPERM;
870 }
871 }
872
873 return security_task_kill(t, info, sig, NULL);
874}
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893static void ptrace_trap_notify(struct task_struct *t)
894{
895 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
896 assert_spin_locked(&t->sighand->siglock);
897
898 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
899 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
900}
901
902
903
904
905
906
907
908
909
910
911
912static bool prepare_signal(int sig, struct task_struct *p, bool force)
913{
914 struct signal_struct *signal = p->signal;
915 struct task_struct *t;
916 sigset_t flush;
917
918 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
919 if (!(signal->flags & SIGNAL_GROUP_EXIT))
920 return sig == SIGKILL;
921
922
923
924 } else if (sig_kernel_stop(sig)) {
925
926
927
928 siginitset(&flush, sigmask(SIGCONT));
929 flush_sigqueue_mask(&flush, &signal->shared_pending);
930 for_each_thread(p, t)
931 flush_sigqueue_mask(&flush, &t->pending);
932 } else if (sig == SIGCONT) {
933 unsigned int why;
934
935
936
937 siginitset(&flush, SIG_KERNEL_STOP_MASK);
938 flush_sigqueue_mask(&flush, &signal->shared_pending);
939 for_each_thread(p, t) {
940 flush_sigqueue_mask(&flush, &t->pending);
941 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
942 if (likely(!(t->ptrace & PT_SEIZED)))
943 wake_up_state(t, __TASK_STOPPED);
944 else
945 ptrace_trap_notify(t);
946 }
947
948
949
950
951
952
953
954
955
956 why = 0;
957 if (signal->flags & SIGNAL_STOP_STOPPED)
958 why |= SIGNAL_CLD_CONTINUED;
959 else if (signal->group_stop_count)
960 why |= SIGNAL_CLD_STOPPED;
961
962 if (why) {
963
964
965
966
967
968 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
969 signal->group_stop_count = 0;
970 signal->group_exit_code = 0;
971 }
972 }
973
974 return !sig_ignored(p, sig, force);
975}
976
977
978
979
980
981
982
983
984
985static inline bool wants_signal(int sig, struct task_struct *p)
986{
987 if (sigismember(&p->blocked, sig))
988 return false;
989
990 if (p->flags & PF_EXITING)
991 return false;
992
993 if (sig == SIGKILL)
994 return true;
995
996 if (task_is_stopped_or_traced(p))
997 return false;
998
999 return task_curr(p) || !task_sigpending(p);
1000}
1001
1002static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
1003{
1004 struct signal_struct *signal = p->signal;
1005 struct task_struct *t;
1006
1007
1008
1009
1010
1011
1012
1013 if (wants_signal(sig, p))
1014 t = p;
1015 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1016
1017
1018
1019
1020 return;
1021 else {
1022
1023
1024
1025 t = signal->curr_target;
1026 while (!wants_signal(sig, t)) {
1027 t = next_thread(t);
1028 if (t == signal->curr_target)
1029
1030
1031
1032
1033
1034 return;
1035 }
1036 signal->curr_target = t;
1037 }
1038
1039
1040
1041
1042
1043 if (sig_fatal(p, sig) &&
1044 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1045 !sigismember(&t->real_blocked, sig) &&
1046 (sig == SIGKILL || !p->ptrace)) {
1047
1048
1049
1050 if (!sig_kernel_coredump(sig)) {
1051
1052
1053
1054
1055
1056
1057 signal->flags = SIGNAL_GROUP_EXIT;
1058 signal->group_exit_code = sig;
1059 signal->group_stop_count = 0;
1060 t = p;
1061 do {
1062 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1063 sigaddset(&t->pending.signal, SIGKILL);
1064 signal_wake_up(t, 1);
1065 } while_each_thread(p, t);
1066 return;
1067 }
1068 }
1069
1070
1071
1072
1073
1074 signal_wake_up(t, sig == SIGKILL);
1075 return;
1076}
1077
1078static inline bool legacy_queue(struct sigpending *signals, int sig)
1079{
1080 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1081}
1082
1083static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1084 enum pid_type type, bool force)
1085{
1086 struct sigpending *pending;
1087 struct sigqueue *q;
1088 int override_rlimit;
1089 int ret = 0, result;
1090
1091 assert_spin_locked(&t->sighand->siglock);
1092
1093 result = TRACE_SIGNAL_IGNORED;
1094 if (!prepare_signal(sig, t, force))
1095 goto ret;
1096
1097 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1098
1099
1100
1101
1102
1103 result = TRACE_SIGNAL_ALREADY_PENDING;
1104 if (legacy_queue(pending, sig))
1105 goto ret;
1106
1107 result = TRACE_SIGNAL_DELIVERED;
1108
1109
1110
1111 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1112 goto out_set;
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 if (sig < SIGRTMIN)
1124 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1125 else
1126 override_rlimit = 0;
1127
1128 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1129
1130 if (q) {
1131 list_add_tail(&q->list, &pending->list);
1132 switch ((unsigned long) info) {
1133 case (unsigned long) SEND_SIG_NOINFO:
1134 clear_siginfo(&q->info);
1135 q->info.si_signo = sig;
1136 q->info.si_errno = 0;
1137 q->info.si_code = SI_USER;
1138 q->info.si_pid = task_tgid_nr_ns(current,
1139 task_active_pid_ns(t));
1140 rcu_read_lock();
1141 q->info.si_uid =
1142 from_kuid_munged(task_cred_xxx(t, user_ns),
1143 current_uid());
1144 rcu_read_unlock();
1145 break;
1146 case (unsigned long) SEND_SIG_PRIV:
1147 clear_siginfo(&q->info);
1148 q->info.si_signo = sig;
1149 q->info.si_errno = 0;
1150 q->info.si_code = SI_KERNEL;
1151 q->info.si_pid = 0;
1152 q->info.si_uid = 0;
1153 break;
1154 default:
1155 copy_siginfo(&q->info, info);
1156 break;
1157 }
1158 } else if (!is_si_special(info) &&
1159 sig >= SIGRTMIN && info->si_code != SI_USER) {
1160
1161
1162
1163
1164
1165 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1166 ret = -EAGAIN;
1167 goto ret;
1168 } else {
1169
1170
1171
1172
1173 result = TRACE_SIGNAL_LOSE_INFO;
1174 }
1175
1176out_set:
1177 signalfd_notify(t, sig);
1178 sigaddset(&pending->signal, sig);
1179
1180
1181 if (type > PIDTYPE_TGID) {
1182 struct multiprocess_signals *delayed;
1183 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1184 sigset_t *signal = &delayed->signal;
1185
1186 if (sig == SIGCONT)
1187 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1188 else if (sig_kernel_stop(sig))
1189 sigdelset(signal, SIGCONT);
1190 sigaddset(signal, sig);
1191 }
1192 }
1193
1194 complete_signal(sig, t, type);
1195ret:
1196 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1197 return ret;
1198}
1199
1200static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1201{
1202 bool ret = false;
1203 switch (siginfo_layout(info->si_signo, info->si_code)) {
1204 case SIL_KILL:
1205 case SIL_CHLD:
1206 case SIL_RT:
1207 ret = true;
1208 break;
1209 case SIL_TIMER:
1210 case SIL_POLL:
1211 case SIL_FAULT:
1212 case SIL_FAULT_TRAPNO:
1213 case SIL_FAULT_MCEERR:
1214 case SIL_FAULT_BNDERR:
1215 case SIL_FAULT_PKUERR:
1216 case SIL_PERF_EVENT:
1217 case SIL_SYS:
1218 ret = false;
1219 break;
1220 }
1221 return ret;
1222}
1223
1224static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1225 enum pid_type type)
1226{
1227
1228 bool force = false;
1229
1230 if (info == SEND_SIG_NOINFO) {
1231
1232 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1233 } else if (info == SEND_SIG_PRIV) {
1234
1235 force = true;
1236 } else if (has_si_pid_and_uid(info)) {
1237
1238 struct user_namespace *t_user_ns;
1239
1240 rcu_read_lock();
1241 t_user_ns = task_cred_xxx(t, user_ns);
1242 if (current_user_ns() != t_user_ns) {
1243 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1244 info->si_uid = from_kuid_munged(t_user_ns, uid);
1245 }
1246 rcu_read_unlock();
1247
1248
1249 force = (info->si_code == SI_KERNEL);
1250
1251
1252 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1253 info->si_pid = 0;
1254 force = true;
1255 }
1256 }
1257 return __send_signal(sig, info, t, type, force);
1258}
1259
1260static void print_fatal_signal(int signr)
1261{
1262 struct pt_regs *regs = signal_pt_regs();
1263 pr_info("potentially unexpected fatal signal %d.\n", signr);
1264
1265#if defined(__i386__) && !defined(__arch_um__)
1266 pr_info("code at %08lx: ", regs->ip);
1267 {
1268 int i;
1269 for (i = 0; i < 16; i++) {
1270 unsigned char insn;
1271
1272 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1273 break;
1274 pr_cont("%02x ", insn);
1275 }
1276 }
1277 pr_cont("\n");
1278#endif
1279 preempt_disable();
1280 show_regs(regs);
1281 preempt_enable();
1282}
1283
1284static int __init setup_print_fatal_signals(char *str)
1285{
1286 get_option (&str, &print_fatal_signals);
1287
1288 return 1;
1289}
1290
1291__setup("print-fatal-signals=", setup_print_fatal_signals);
1292
1293int
1294__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1295{
1296 return send_signal(sig, info, p, PIDTYPE_TGID);
1297}
1298
1299int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1300 enum pid_type type)
1301{
1302 unsigned long flags;
1303 int ret = -ESRCH;
1304
1305 if (lock_task_sighand(p, &flags)) {
1306 ret = send_signal(sig, info, p, type);
1307 unlock_task_sighand(p, &flags);
1308 }
1309
1310 return ret;
1311}
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324static int
1325force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1326{
1327 unsigned long int flags;
1328 int ret, blocked, ignored;
1329 struct k_sigaction *action;
1330 int sig = info->si_signo;
1331
1332 spin_lock_irqsave(&t->sighand->siglock, flags);
1333 action = &t->sighand->action[sig-1];
1334 ignored = action->sa.sa_handler == SIG_IGN;
1335 blocked = sigismember(&t->blocked, sig);
1336 if (blocked || ignored) {
1337 action->sa.sa_handler = SIG_DFL;
1338 if (blocked) {
1339 sigdelset(&t->blocked, sig);
1340 recalc_sigpending_and_wake(t);
1341 }
1342 }
1343
1344
1345
1346
1347 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1348 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1349 ret = send_signal(sig, info, t, PIDTYPE_PID);
1350 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1351
1352 return ret;
1353}
1354
1355int force_sig_info(struct kernel_siginfo *info)
1356{
1357 return force_sig_info_to_task(info, current);
1358}
1359
1360
1361
1362
1363int zap_other_threads(struct task_struct *p)
1364{
1365 struct task_struct *t = p;
1366 int count = 0;
1367
1368 p->signal->group_stop_count = 0;
1369
1370 while_each_thread(p, t) {
1371 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1372 count++;
1373
1374
1375 if (t->exit_state)
1376 continue;
1377 sigaddset(&t->pending.signal, SIGKILL);
1378 signal_wake_up(t, 1);
1379 }
1380
1381 return count;
1382}
1383
1384struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1385 unsigned long *flags)
1386{
1387 struct sighand_struct *sighand;
1388
1389 rcu_read_lock();
1390 for (;;) {
1391 sighand = rcu_dereference(tsk->sighand);
1392 if (unlikely(sighand == NULL))
1393 break;
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406 spin_lock_irqsave(&sighand->siglock, *flags);
1407 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1408 break;
1409 spin_unlock_irqrestore(&sighand->siglock, *flags);
1410 }
1411 rcu_read_unlock();
1412
1413 return sighand;
1414}
1415
1416
1417
1418
1419int group_send_sig_info(int sig, struct kernel_siginfo *info,
1420 struct task_struct *p, enum pid_type type)
1421{
1422 int ret;
1423
1424 rcu_read_lock();
1425 ret = check_kill_permission(sig, info, p);
1426 rcu_read_unlock();
1427
1428 if (!ret && sig)
1429 ret = do_send_sig_info(sig, info, p, type);
1430
1431 return ret;
1432}
1433
1434
1435
1436
1437
1438
1439int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1440{
1441 struct task_struct *p = NULL;
1442 int retval, success;
1443
1444 success = 0;
1445 retval = -ESRCH;
1446 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1447 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1448 success |= !err;
1449 retval = err;
1450 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1451 return success ? 0 : retval;
1452}
1453
1454int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1455{
1456 int error = -ESRCH;
1457 struct task_struct *p;
1458
1459 for (;;) {
1460 rcu_read_lock();
1461 p = pid_task(pid, PIDTYPE_PID);
1462 if (p)
1463 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1464 rcu_read_unlock();
1465 if (likely(!p || error != -ESRCH))
1466 return error;
1467
1468
1469
1470
1471
1472
1473 }
1474}
1475
1476static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1477{
1478 int error;
1479 rcu_read_lock();
1480 error = kill_pid_info(sig, info, find_vpid(pid));
1481 rcu_read_unlock();
1482 return error;
1483}
1484
1485static inline bool kill_as_cred_perm(const struct cred *cred,
1486 struct task_struct *target)
1487{
1488 const struct cred *pcred = __task_cred(target);
1489
1490 return uid_eq(cred->euid, pcred->suid) ||
1491 uid_eq(cred->euid, pcred->uid) ||
1492 uid_eq(cred->uid, pcred->suid) ||
1493 uid_eq(cred->uid, pcred->uid);
1494}
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1522 struct pid *pid, const struct cred *cred)
1523{
1524 struct kernel_siginfo info;
1525 struct task_struct *p;
1526 unsigned long flags;
1527 int ret = -EINVAL;
1528
1529 if (!valid_signal(sig))
1530 return ret;
1531
1532 clear_siginfo(&info);
1533 info.si_signo = sig;
1534 info.si_errno = errno;
1535 info.si_code = SI_ASYNCIO;
1536 *((sigval_t *)&info.si_pid) = addr;
1537
1538 rcu_read_lock();
1539 p = pid_task(pid, PIDTYPE_PID);
1540 if (!p) {
1541 ret = -ESRCH;
1542 goto out_unlock;
1543 }
1544 if (!kill_as_cred_perm(cred, p)) {
1545 ret = -EPERM;
1546 goto out_unlock;
1547 }
1548 ret = security_task_kill(p, &info, sig, cred);
1549 if (ret)
1550 goto out_unlock;
1551
1552 if (sig) {
1553 if (lock_task_sighand(p, &flags)) {
1554 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1555 unlock_task_sighand(p, &flags);
1556 } else
1557 ret = -ESRCH;
1558 }
1559out_unlock:
1560 rcu_read_unlock();
1561 return ret;
1562}
1563EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1564
1565
1566
1567
1568
1569
1570
1571
1572static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1573{
1574 int ret;
1575
1576 if (pid > 0)
1577 return kill_proc_info(sig, info, pid);
1578
1579
1580 if (pid == INT_MIN)
1581 return -ESRCH;
1582
1583 read_lock(&tasklist_lock);
1584 if (pid != -1) {
1585 ret = __kill_pgrp_info(sig, info,
1586 pid ? find_vpid(-pid) : task_pgrp(current));
1587 } else {
1588 int retval = 0, count = 0;
1589 struct task_struct * p;
1590
1591 for_each_process(p) {
1592 if (task_pid_vnr(p) > 1 &&
1593 !same_thread_group(p, current)) {
1594 int err = group_send_sig_info(sig, info, p,
1595 PIDTYPE_MAX);
1596 ++count;
1597 if (err != -EPERM)
1598 retval = err;
1599 }
1600 }
1601 ret = count ? retval : -ESRCH;
1602 }
1603 read_unlock(&tasklist_lock);
1604
1605 return ret;
1606}
1607
1608
1609
1610
1611
1612int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1613{
1614
1615
1616
1617
1618 if (!valid_signal(sig))
1619 return -EINVAL;
1620
1621 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1622}
1623EXPORT_SYMBOL(send_sig_info);
1624
1625#define __si_special(priv) \
1626 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1627
1628int
1629send_sig(int sig, struct task_struct *p, int priv)
1630{
1631 return send_sig_info(sig, __si_special(priv), p);
1632}
1633EXPORT_SYMBOL(send_sig);
1634
1635void force_sig(int sig)
1636{
1637 struct kernel_siginfo info;
1638
1639 clear_siginfo(&info);
1640 info.si_signo = sig;
1641 info.si_errno = 0;
1642 info.si_code = SI_KERNEL;
1643 info.si_pid = 0;
1644 info.si_uid = 0;
1645 force_sig_info(&info);
1646}
1647EXPORT_SYMBOL(force_sig);
1648
1649
1650
1651
1652
1653
1654
1655void force_sigsegv(int sig)
1656{
1657 struct task_struct *p = current;
1658
1659 if (sig == SIGSEGV) {
1660 unsigned long flags;
1661 spin_lock_irqsave(&p->sighand->siglock, flags);
1662 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1663 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1664 }
1665 force_sig(SIGSEGV);
1666}
1667
1668int force_sig_fault_to_task(int sig, int code, void __user *addr
1669 ___ARCH_SI_TRAPNO(int trapno)
1670 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1671 , struct task_struct *t)
1672{
1673 struct kernel_siginfo info;
1674
1675 clear_siginfo(&info);
1676 info.si_signo = sig;
1677 info.si_errno = 0;
1678 info.si_code = code;
1679 info.si_addr = addr;
1680#ifdef __ARCH_SI_TRAPNO
1681 info.si_trapno = trapno;
1682#endif
1683#ifdef __ia64__
1684 info.si_imm = imm;
1685 info.si_flags = flags;
1686 info.si_isr = isr;
1687#endif
1688 return force_sig_info_to_task(&info, t);
1689}
1690
1691int force_sig_fault(int sig, int code, void __user *addr
1692 ___ARCH_SI_TRAPNO(int trapno)
1693 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1694{
1695 return force_sig_fault_to_task(sig, code, addr
1696 ___ARCH_SI_TRAPNO(trapno)
1697 ___ARCH_SI_IA64(imm, flags, isr), current);
1698}
1699
1700int send_sig_fault(int sig, int code, void __user *addr
1701 ___ARCH_SI_TRAPNO(int trapno)
1702 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1703 , struct task_struct *t)
1704{
1705 struct kernel_siginfo info;
1706
1707 clear_siginfo(&info);
1708 info.si_signo = sig;
1709 info.si_errno = 0;
1710 info.si_code = code;
1711 info.si_addr = addr;
1712#ifdef __ARCH_SI_TRAPNO
1713 info.si_trapno = trapno;
1714#endif
1715#ifdef __ia64__
1716 info.si_imm = imm;
1717 info.si_flags = flags;
1718 info.si_isr = isr;
1719#endif
1720 return send_sig_info(info.si_signo, &info, t);
1721}
1722
1723int force_sig_mceerr(int code, void __user *addr, short lsb)
1724{
1725 struct kernel_siginfo info;
1726
1727 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1728 clear_siginfo(&info);
1729 info.si_signo = SIGBUS;
1730 info.si_errno = 0;
1731 info.si_code = code;
1732 info.si_addr = addr;
1733 info.si_addr_lsb = lsb;
1734 return force_sig_info(&info);
1735}
1736
1737int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1738{
1739 struct kernel_siginfo info;
1740
1741 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1742 clear_siginfo(&info);
1743 info.si_signo = SIGBUS;
1744 info.si_errno = 0;
1745 info.si_code = code;
1746 info.si_addr = addr;
1747 info.si_addr_lsb = lsb;
1748 return send_sig_info(info.si_signo, &info, t);
1749}
1750EXPORT_SYMBOL(send_sig_mceerr);
1751
1752int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1753{
1754 struct kernel_siginfo info;
1755
1756 clear_siginfo(&info);
1757 info.si_signo = SIGSEGV;
1758 info.si_errno = 0;
1759 info.si_code = SEGV_BNDERR;
1760 info.si_addr = addr;
1761 info.si_lower = lower;
1762 info.si_upper = upper;
1763 return force_sig_info(&info);
1764}
1765
1766#ifdef SEGV_PKUERR
1767int force_sig_pkuerr(void __user *addr, u32 pkey)
1768{
1769 struct kernel_siginfo info;
1770
1771 clear_siginfo(&info);
1772 info.si_signo = SIGSEGV;
1773 info.si_errno = 0;
1774 info.si_code = SEGV_PKUERR;
1775 info.si_addr = addr;
1776 info.si_pkey = pkey;
1777 return force_sig_info(&info);
1778}
1779#endif
1780
1781int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1782{
1783 struct kernel_siginfo info;
1784
1785 clear_siginfo(&info);
1786 info.si_signo = SIGTRAP;
1787 info.si_errno = 0;
1788 info.si_code = TRAP_PERF;
1789 info.si_addr = addr;
1790 info.si_perf_data = sig_data;
1791 info.si_perf_type = type;
1792
1793 return force_sig_info(&info);
1794}
1795
1796
1797
1798
1799int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1800{
1801 struct kernel_siginfo info;
1802
1803 clear_siginfo(&info);
1804 info.si_signo = SIGTRAP;
1805 info.si_errno = errno;
1806 info.si_code = TRAP_HWBKPT;
1807 info.si_addr = addr;
1808 return force_sig_info(&info);
1809}
1810
1811int kill_pgrp(struct pid *pid, int sig, int priv)
1812{
1813 int ret;
1814
1815 read_lock(&tasklist_lock);
1816 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1817 read_unlock(&tasklist_lock);
1818
1819 return ret;
1820}
1821EXPORT_SYMBOL(kill_pgrp);
1822
1823int kill_pid(struct pid *pid, int sig, int priv)
1824{
1825 return kill_pid_info(sig, __si_special(priv), pid);
1826}
1827EXPORT_SYMBOL(kill_pid);
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838struct sigqueue *sigqueue_alloc(void)
1839{
1840 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1841}
1842
1843void sigqueue_free(struct sigqueue *q)
1844{
1845 unsigned long flags;
1846 spinlock_t *lock = ¤t->sighand->siglock;
1847
1848 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1849
1850
1851
1852
1853
1854 spin_lock_irqsave(lock, flags);
1855 q->flags &= ~SIGQUEUE_PREALLOC;
1856
1857
1858
1859
1860 if (!list_empty(&q->list))
1861 q = NULL;
1862 spin_unlock_irqrestore(lock, flags);
1863
1864 if (q)
1865 __sigqueue_free(q);
1866}
1867
1868int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1869{
1870 int sig = q->info.si_signo;
1871 struct sigpending *pending;
1872 struct task_struct *t;
1873 unsigned long flags;
1874 int ret, result;
1875
1876 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1877
1878 ret = -1;
1879 rcu_read_lock();
1880 t = pid_task(pid, type);
1881 if (!t || !likely(lock_task_sighand(t, &flags)))
1882 goto ret;
1883
1884 ret = 1;
1885 result = TRACE_SIGNAL_IGNORED;
1886 if (!prepare_signal(sig, t, false))
1887 goto out;
1888
1889 ret = 0;
1890 if (unlikely(!list_empty(&q->list))) {
1891
1892
1893
1894
1895 BUG_ON(q->info.si_code != SI_TIMER);
1896 q->info.si_overrun++;
1897 result = TRACE_SIGNAL_ALREADY_PENDING;
1898 goto out;
1899 }
1900 q->info.si_overrun = 0;
1901
1902 signalfd_notify(t, sig);
1903 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1904 list_add_tail(&q->list, &pending->list);
1905 sigaddset(&pending->signal, sig);
1906 complete_signal(sig, t, type);
1907 result = TRACE_SIGNAL_DELIVERED;
1908out:
1909 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1910 unlock_task_sighand(t, &flags);
1911ret:
1912 rcu_read_unlock();
1913 return ret;
1914}
1915
1916static void do_notify_pidfd(struct task_struct *task)
1917{
1918 struct pid *pid;
1919
1920 WARN_ON(task->exit_state == 0);
1921 pid = task_pid(task);
1922 wake_up_all(&pid->wait_pidfd);
1923}
1924
1925
1926
1927
1928
1929
1930
1931
1932bool do_notify_parent(struct task_struct *tsk, int sig)
1933{
1934 struct kernel_siginfo info;
1935 unsigned long flags;
1936 struct sighand_struct *psig;
1937 bool autoreap = false;
1938 u64 utime, stime;
1939
1940 BUG_ON(sig == -1);
1941
1942
1943 BUG_ON(task_is_stopped_or_traced(tsk));
1944
1945 BUG_ON(!tsk->ptrace &&
1946 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1947
1948
1949 do_notify_pidfd(tsk);
1950
1951 if (sig != SIGCHLD) {
1952
1953
1954
1955
1956 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1957 sig = SIGCHLD;
1958 }
1959
1960 clear_siginfo(&info);
1961 info.si_signo = sig;
1962 info.si_errno = 0;
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974 rcu_read_lock();
1975 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1976 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1977 task_uid(tsk));
1978 rcu_read_unlock();
1979
1980 task_cputime(tsk, &utime, &stime);
1981 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1982 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1983
1984 info.si_status = tsk->exit_code & 0x7f;
1985 if (tsk->exit_code & 0x80)
1986 info.si_code = CLD_DUMPED;
1987 else if (tsk->exit_code & 0x7f)
1988 info.si_code = CLD_KILLED;
1989 else {
1990 info.si_code = CLD_EXITED;
1991 info.si_status = tsk->exit_code >> 8;
1992 }
1993
1994 psig = tsk->parent->sighand;
1995 spin_lock_irqsave(&psig->siglock, flags);
1996 if (!tsk->ptrace && sig == SIGCHLD &&
1997 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1998 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014 autoreap = true;
2015 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2016 sig = 0;
2017 }
2018
2019
2020
2021
2022 if (valid_signal(sig) && sig)
2023 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2024 __wake_up_parent(tsk, tsk->parent);
2025 spin_unlock_irqrestore(&psig->siglock, flags);
2026
2027 return autoreap;
2028}
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043static void do_notify_parent_cldstop(struct task_struct *tsk,
2044 bool for_ptracer, int why)
2045{
2046 struct kernel_siginfo info;
2047 unsigned long flags;
2048 struct task_struct *parent;
2049 struct sighand_struct *sighand;
2050 u64 utime, stime;
2051
2052 if (for_ptracer) {
2053 parent = tsk->parent;
2054 } else {
2055 tsk = tsk->group_leader;
2056 parent = tsk->real_parent;
2057 }
2058
2059 clear_siginfo(&info);
2060 info.si_signo = SIGCHLD;
2061 info.si_errno = 0;
2062
2063
2064
2065 rcu_read_lock();
2066 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2067 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2068 rcu_read_unlock();
2069
2070 task_cputime(tsk, &utime, &stime);
2071 info.si_utime = nsec_to_clock_t(utime);
2072 info.si_stime = nsec_to_clock_t(stime);
2073
2074 info.si_code = why;
2075 switch (why) {
2076 case CLD_CONTINUED:
2077 info.si_status = SIGCONT;
2078 break;
2079 case CLD_STOPPED:
2080 info.si_status = tsk->signal->group_exit_code & 0x7f;
2081 break;
2082 case CLD_TRAPPED:
2083 info.si_status = tsk->exit_code & 0x7f;
2084 break;
2085 default:
2086 BUG();
2087 }
2088
2089 sighand = parent->sighand;
2090 spin_lock_irqsave(&sighand->siglock, flags);
2091 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2092 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2093 __group_send_sig_info(SIGCHLD, &info, parent);
2094
2095
2096
2097 __wake_up_parent(tsk, parent);
2098 spin_unlock_irqrestore(&sighand->siglock, flags);
2099}
2100
2101static inline bool may_ptrace_stop(void)
2102{
2103 if (!likely(current->ptrace))
2104 return false;
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118 if (unlikely(current->mm->core_state) &&
2119 unlikely(current->mm == current->parent->mm))
2120 return false;
2121
2122 return true;
2123}
2124
2125
2126
2127
2128
2129static bool sigkill_pending(struct task_struct *tsk)
2130{
2131 return sigismember(&tsk->pending.signal, SIGKILL) ||
2132 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2133}
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2147 __releases(¤t->sighand->siglock)
2148 __acquires(¤t->sighand->siglock)
2149{
2150 bool gstop_done = false;
2151
2152 if (arch_ptrace_stop_needed(exit_code, info)) {
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164 spin_unlock_irq(¤t->sighand->siglock);
2165 arch_ptrace_stop(exit_code, info);
2166 spin_lock_irq(¤t->sighand->siglock);
2167 if (sigkill_pending(current))
2168 return;
2169 }
2170
2171 set_special_state(TASK_TRACED);
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191 smp_wmb();
2192
2193 current->last_siginfo = info;
2194 current->exit_code = exit_code;
2195
2196
2197
2198
2199
2200
2201
2202
2203 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2204 gstop_done = task_participate_group_stop(current);
2205
2206
2207 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2208 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2209 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2210
2211
2212 task_clear_jobctl_trapping(current);
2213
2214 spin_unlock_irq(¤t->sighand->siglock);
2215 read_lock(&tasklist_lock);
2216 if (may_ptrace_stop()) {
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227 do_notify_parent_cldstop(current, true, why);
2228 if (gstop_done && ptrace_reparented(current))
2229 do_notify_parent_cldstop(current, false, why);
2230
2231
2232
2233
2234
2235
2236
2237 preempt_disable();
2238 read_unlock(&tasklist_lock);
2239 cgroup_enter_frozen();
2240 preempt_enable_no_resched();
2241 freezable_schedule();
2242 cgroup_leave_frozen(true);
2243 } else {
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254 if (gstop_done)
2255 do_notify_parent_cldstop(current, false, why);
2256
2257
2258 __set_current_state(TASK_RUNNING);
2259 if (clear_code)
2260 current->exit_code = 0;
2261 read_unlock(&tasklist_lock);
2262 }
2263
2264
2265
2266
2267
2268
2269 spin_lock_irq(¤t->sighand->siglock);
2270 current->last_siginfo = NULL;
2271
2272
2273 current->jobctl &= ~JOBCTL_LISTENING;
2274
2275
2276
2277
2278
2279
2280 recalc_sigpending_tsk(current);
2281}
2282
2283static void ptrace_do_notify(int signr, int exit_code, int why)
2284{
2285 kernel_siginfo_t info;
2286
2287 clear_siginfo(&info);
2288 info.si_signo = signr;
2289 info.si_code = exit_code;
2290 info.si_pid = task_pid_vnr(current);
2291 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2292
2293
2294 ptrace_stop(exit_code, why, 1, &info);
2295}
2296
2297void ptrace_notify(int exit_code)
2298{
2299 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2300 if (unlikely(current->task_works))
2301 task_work_run();
2302
2303 spin_lock_irq(¤t->sighand->siglock);
2304 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2305 spin_unlock_irq(¤t->sighand->siglock);
2306}
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330static bool do_signal_stop(int signr)
2331 __releases(¤t->sighand->siglock)
2332{
2333 struct signal_struct *sig = current->signal;
2334
2335 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2336 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2337 struct task_struct *t;
2338
2339
2340 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2341
2342 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2343 unlikely(signal_group_exit(sig)))
2344 return false;
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2365 sig->group_exit_code = signr;
2366
2367 sig->group_stop_count = 0;
2368
2369 if (task_set_jobctl_pending(current, signr | gstop))
2370 sig->group_stop_count++;
2371
2372 t = current;
2373 while_each_thread(current, t) {
2374
2375
2376
2377
2378
2379 if (!task_is_stopped(t) &&
2380 task_set_jobctl_pending(t, signr | gstop)) {
2381 sig->group_stop_count++;
2382 if (likely(!(t->ptrace & PT_SEIZED)))
2383 signal_wake_up(t, 0);
2384 else
2385 ptrace_trap_notify(t);
2386 }
2387 }
2388 }
2389
2390 if (likely(!current->ptrace)) {
2391 int notify = 0;
2392
2393
2394
2395
2396
2397
2398 if (task_participate_group_stop(current))
2399 notify = CLD_STOPPED;
2400
2401 set_special_state(TASK_STOPPED);
2402 spin_unlock_irq(¤t->sighand->siglock);
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413 if (notify) {
2414 read_lock(&tasklist_lock);
2415 do_notify_parent_cldstop(current, false, notify);
2416 read_unlock(&tasklist_lock);
2417 }
2418
2419
2420 cgroup_enter_frozen();
2421 freezable_schedule();
2422 return true;
2423 } else {
2424
2425
2426
2427
2428 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2429 return false;
2430 }
2431}
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448static void do_jobctl_trap(void)
2449{
2450 struct signal_struct *signal = current->signal;
2451 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2452
2453 if (current->ptrace & PT_SEIZED) {
2454 if (!signal->group_stop_count &&
2455 !(signal->flags & SIGNAL_STOP_STOPPED))
2456 signr = SIGTRAP;
2457 WARN_ON_ONCE(!signr);
2458 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2459 CLD_STOPPED);
2460 } else {
2461 WARN_ON_ONCE(!signr);
2462 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2463 current->exit_code = 0;
2464 }
2465}
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477static void do_freezer_trap(void)
2478 __releases(¤t->sighand->siglock)
2479{
2480
2481
2482
2483
2484
2485 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2486 JOBCTL_TRAP_FREEZE) {
2487 spin_unlock_irq(¤t->sighand->siglock);
2488 return;
2489 }
2490
2491
2492
2493
2494
2495
2496
2497 __set_current_state(TASK_INTERRUPTIBLE);
2498 clear_thread_flag(TIF_SIGPENDING);
2499 spin_unlock_irq(¤t->sighand->siglock);
2500 cgroup_enter_frozen();
2501 freezable_schedule();
2502}
2503
2504static int ptrace_signal(int signr, kernel_siginfo_t *info)
2505{
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2516 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2517
2518
2519 signr = current->exit_code;
2520 if (signr == 0)
2521 return signr;
2522
2523 current->exit_code = 0;
2524
2525
2526
2527
2528
2529
2530
2531 if (signr != info->si_signo) {
2532 clear_siginfo(info);
2533 info->si_signo = signr;
2534 info->si_errno = 0;
2535 info->si_code = SI_USER;
2536 rcu_read_lock();
2537 info->si_pid = task_pid_vnr(current->parent);
2538 info->si_uid = from_kuid_munged(current_user_ns(),
2539 task_uid(current->parent));
2540 rcu_read_unlock();
2541 }
2542
2543
2544 if (sigismember(¤t->blocked, signr)) {
2545 send_signal(signr, info, current, PIDTYPE_PID);
2546 signr = 0;
2547 }
2548
2549 return signr;
2550}
2551
2552static void hide_si_addr_tag_bits(struct ksignal *ksig)
2553{
2554 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2555 case SIL_FAULT:
2556 case SIL_FAULT_TRAPNO:
2557 case SIL_FAULT_MCEERR:
2558 case SIL_FAULT_BNDERR:
2559 case SIL_FAULT_PKUERR:
2560 case SIL_PERF_EVENT:
2561 ksig->info.si_addr = arch_untagged_si_addr(
2562 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2563 break;
2564 case SIL_KILL:
2565 case SIL_TIMER:
2566 case SIL_POLL:
2567 case SIL_CHLD:
2568 case SIL_RT:
2569 case SIL_SYS:
2570 break;
2571 }
2572}
2573
2574bool get_signal(struct ksignal *ksig)
2575{
2576 struct sighand_struct *sighand = current->sighand;
2577 struct signal_struct *signal = current->signal;
2578 int signr;
2579
2580 if (unlikely(current->task_works))
2581 task_work_run();
2582
2583
2584
2585
2586
2587
2588 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2589 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2590 tracehook_notify_signal();
2591 if (!task_sigpending(current))
2592 return false;
2593 }
2594
2595 if (unlikely(uprobe_deny_signal()))
2596 return false;
2597
2598
2599
2600
2601
2602
2603 try_to_freeze();
2604
2605relock:
2606 spin_lock_irq(&sighand->siglock);
2607
2608
2609
2610
2611
2612
2613 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2614 int why;
2615
2616 if (signal->flags & SIGNAL_CLD_CONTINUED)
2617 why = CLD_CONTINUED;
2618 else
2619 why = CLD_STOPPED;
2620
2621 signal->flags &= ~SIGNAL_CLD_MASK;
2622
2623 spin_unlock_irq(&sighand->siglock);
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633 read_lock(&tasklist_lock);
2634 do_notify_parent_cldstop(current, false, why);
2635
2636 if (ptrace_reparented(current->group_leader))
2637 do_notify_parent_cldstop(current->group_leader,
2638 true, why);
2639 read_unlock(&tasklist_lock);
2640
2641 goto relock;
2642 }
2643
2644
2645 if (signal_group_exit(signal)) {
2646 ksig->info.si_signo = signr = SIGKILL;
2647 sigdelset(¤t->pending.signal, SIGKILL);
2648 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2649 &sighand->action[SIGKILL - 1]);
2650 recalc_sigpending();
2651 goto fatal;
2652 }
2653
2654 for (;;) {
2655 struct k_sigaction *ka;
2656
2657 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2658 do_signal_stop(0))
2659 goto relock;
2660
2661 if (unlikely(current->jobctl &
2662 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2663 if (current->jobctl & JOBCTL_TRAP_MASK) {
2664 do_jobctl_trap();
2665 spin_unlock_irq(&sighand->siglock);
2666 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2667 do_freezer_trap();
2668
2669 goto relock;
2670 }
2671
2672
2673
2674
2675
2676 if (unlikely(cgroup_task_frozen(current))) {
2677 spin_unlock_irq(&sighand->siglock);
2678 cgroup_leave_frozen(false);
2679 goto relock;
2680 }
2681
2682
2683
2684
2685
2686
2687
2688 signr = dequeue_synchronous_signal(&ksig->info);
2689 if (!signr)
2690 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2691
2692 if (!signr)
2693 break;
2694
2695 if (unlikely(current->ptrace) && signr != SIGKILL) {
2696 signr = ptrace_signal(signr, &ksig->info);
2697 if (!signr)
2698 continue;
2699 }
2700
2701 ka = &sighand->action[signr-1];
2702
2703
2704 trace_signal_deliver(signr, &ksig->info, ka);
2705
2706 if (ka->sa.sa_handler == SIG_IGN)
2707 continue;
2708 if (ka->sa.sa_handler != SIG_DFL) {
2709
2710 ksig->ka = *ka;
2711
2712 if (ka->sa.sa_flags & SA_ONESHOT)
2713 ka->sa.sa_handler = SIG_DFL;
2714
2715 break;
2716 }
2717
2718
2719
2720
2721 if (sig_kernel_ignore(signr))
2722 continue;
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2735 !sig_kernel_only(signr))
2736 continue;
2737
2738 if (sig_kernel_stop(signr)) {
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749 if (signr != SIGSTOP) {
2750 spin_unlock_irq(&sighand->siglock);
2751
2752
2753
2754 if (is_current_pgrp_orphaned())
2755 goto relock;
2756
2757 spin_lock_irq(&sighand->siglock);
2758 }
2759
2760 if (likely(do_signal_stop(ksig->info.si_signo))) {
2761
2762 goto relock;
2763 }
2764
2765
2766
2767
2768
2769 continue;
2770 }
2771
2772 fatal:
2773 spin_unlock_irq(&sighand->siglock);
2774 if (unlikely(cgroup_task_frozen(current)))
2775 cgroup_leave_frozen(true);
2776
2777
2778
2779
2780 current->flags |= PF_SIGNALED;
2781
2782 if (sig_kernel_coredump(signr)) {
2783 if (print_fatal_signals)
2784 print_fatal_signal(ksig->info.si_signo);
2785 proc_coredump_connector(current);
2786
2787
2788
2789
2790
2791
2792
2793
2794 do_coredump(&ksig->info);
2795 }
2796
2797
2798
2799
2800
2801
2802 if (current->flags & PF_IO_WORKER)
2803 goto out;
2804
2805
2806
2807
2808 do_group_exit(ksig->info.si_signo);
2809
2810 }
2811 spin_unlock_irq(&sighand->siglock);
2812out:
2813 ksig->sig = signr;
2814
2815 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2816 hide_si_addr_tag_bits(ksig);
2817
2818 return ksig->sig > 0;
2819}
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831static void signal_delivered(struct ksignal *ksig, int stepping)
2832{
2833 sigset_t blocked;
2834
2835
2836
2837
2838
2839 clear_restore_sigmask();
2840
2841 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2842 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2843 sigaddset(&blocked, ksig->sig);
2844 set_current_blocked(&blocked);
2845 if (current->sas_ss_flags & SS_AUTODISARM)
2846 sas_ss_reset(current);
2847 tracehook_signal_handler(stepping);
2848}
2849
2850void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2851{
2852 if (failed)
2853 force_sigsegv(ksig->sig);
2854 else
2855 signal_delivered(ksig, stepping);
2856}
2857
2858
2859
2860
2861
2862
2863static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2864{
2865 sigset_t retarget;
2866 struct task_struct *t;
2867
2868 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2869 if (sigisemptyset(&retarget))
2870 return;
2871
2872 t = tsk;
2873 while_each_thread(tsk, t) {
2874 if (t->flags & PF_EXITING)
2875 continue;
2876
2877 if (!has_pending_signals(&retarget, &t->blocked))
2878 continue;
2879
2880 sigandsets(&retarget, &retarget, &t->blocked);
2881
2882 if (!task_sigpending(t))
2883 signal_wake_up(t, 0);
2884
2885 if (sigisemptyset(&retarget))
2886 break;
2887 }
2888}
2889
2890void exit_signals(struct task_struct *tsk)
2891{
2892 int group_stop = 0;
2893 sigset_t unblocked;
2894
2895
2896
2897
2898
2899 cgroup_threadgroup_change_begin(tsk);
2900
2901 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2902 tsk->flags |= PF_EXITING;
2903 cgroup_threadgroup_change_end(tsk);
2904 return;
2905 }
2906
2907 spin_lock_irq(&tsk->sighand->siglock);
2908
2909
2910
2911
2912 tsk->flags |= PF_EXITING;
2913
2914 cgroup_threadgroup_change_end(tsk);
2915
2916 if (!task_sigpending(tsk))
2917 goto out;
2918
2919 unblocked = tsk->blocked;
2920 signotset(&unblocked);
2921 retarget_shared_pending(tsk, &unblocked);
2922
2923 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2924 task_participate_group_stop(tsk))
2925 group_stop = CLD_STOPPED;
2926out:
2927 spin_unlock_irq(&tsk->sighand->siglock);
2928
2929
2930
2931
2932
2933 if (unlikely(group_stop)) {
2934 read_lock(&tasklist_lock);
2935 do_notify_parent_cldstop(tsk, false, group_stop);
2936 read_unlock(&tasklist_lock);
2937 }
2938}
2939
2940
2941
2942
2943
2944
2945
2946
2947SYSCALL_DEFINE0(restart_syscall)
2948{
2949 struct restart_block *restart = ¤t->restart_block;
2950 return restart->fn(restart);
2951}
2952
2953long do_no_restart_syscall(struct restart_block *param)
2954{
2955 return -EINTR;
2956}
2957
2958static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2959{
2960 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2961 sigset_t newblocked;
2962
2963 sigandnsets(&newblocked, newset, ¤t->blocked);
2964 retarget_shared_pending(tsk, &newblocked);
2965 }
2966 tsk->blocked = *newset;
2967 recalc_sigpending();
2968}
2969
2970
2971
2972
2973
2974
2975
2976
2977void set_current_blocked(sigset_t *newset)
2978{
2979 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2980 __set_current_blocked(newset);
2981}
2982
2983void __set_current_blocked(const sigset_t *newset)
2984{
2985 struct task_struct *tsk = current;
2986
2987
2988
2989
2990
2991 if (sigequalsets(&tsk->blocked, newset))
2992 return;
2993
2994 spin_lock_irq(&tsk->sighand->siglock);
2995 __set_task_blocked(tsk, newset);
2996 spin_unlock_irq(&tsk->sighand->siglock);
2997}
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3008{
3009 struct task_struct *tsk = current;
3010 sigset_t newset;
3011
3012
3013 if (oldset)
3014 *oldset = tsk->blocked;
3015
3016 switch (how) {
3017 case SIG_BLOCK:
3018 sigorsets(&newset, &tsk->blocked, set);
3019 break;
3020 case SIG_UNBLOCK:
3021 sigandnsets(&newset, &tsk->blocked, set);
3022 break;
3023 case SIG_SETMASK:
3024 newset = *set;
3025 break;
3026 default:
3027 return -EINVAL;
3028 }
3029
3030 __set_current_blocked(&newset);
3031 return 0;
3032}
3033EXPORT_SYMBOL(sigprocmask);
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3045{
3046 sigset_t kmask;
3047
3048 if (!umask)
3049 return 0;
3050 if (sigsetsize != sizeof(sigset_t))
3051 return -EINVAL;
3052 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3053 return -EFAULT;
3054
3055 set_restore_sigmask();
3056 current->saved_sigmask = current->blocked;
3057 set_current_blocked(&kmask);
3058
3059 return 0;
3060}
3061
3062#ifdef CONFIG_COMPAT
3063int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3064 size_t sigsetsize)
3065{
3066 sigset_t kmask;
3067
3068 if (!umask)
3069 return 0;
3070 if (sigsetsize != sizeof(compat_sigset_t))
3071 return -EINVAL;
3072 if (get_compat_sigset(&kmask, umask))
3073 return -EFAULT;
3074
3075 set_restore_sigmask();
3076 current->saved_sigmask = current->blocked;
3077 set_current_blocked(&kmask);
3078
3079 return 0;
3080}
3081#endif
3082
3083
3084
3085
3086
3087
3088
3089
3090SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3091 sigset_t __user *, oset, size_t, sigsetsize)
3092{
3093 sigset_t old_set, new_set;
3094 int error;
3095
3096
3097 if (sigsetsize != sizeof(sigset_t))
3098 return -EINVAL;
3099
3100 old_set = current->blocked;
3101
3102 if (nset) {
3103 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3104 return -EFAULT;
3105 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3106
3107 error = sigprocmask(how, &new_set, NULL);
3108 if (error)
3109 return error;
3110 }
3111
3112 if (oset) {
3113 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3114 return -EFAULT;
3115 }
3116
3117 return 0;
3118}
3119
3120#ifdef CONFIG_COMPAT
3121COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3122 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3123{
3124 sigset_t old_set = current->blocked;
3125
3126
3127 if (sigsetsize != sizeof(sigset_t))
3128 return -EINVAL;
3129
3130 if (nset) {
3131 sigset_t new_set;
3132 int error;
3133 if (get_compat_sigset(&new_set, nset))
3134 return -EFAULT;
3135 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3136
3137 error = sigprocmask(how, &new_set, NULL);
3138 if (error)
3139 return error;
3140 }
3141 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3142}
3143#endif
3144
3145static void do_sigpending(sigset_t *set)
3146{
3147 spin_lock_irq(¤t->sighand->siglock);
3148 sigorsets(set, ¤t->pending.signal,
3149 ¤t->signal->shared_pending.signal);
3150 spin_unlock_irq(¤t->sighand->siglock);
3151
3152
3153 sigandsets(set, ¤t->blocked, set);
3154}
3155
3156
3157
3158
3159
3160
3161
3162SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3163{
3164 sigset_t set;
3165
3166 if (sigsetsize > sizeof(*uset))
3167 return -EINVAL;
3168
3169 do_sigpending(&set);
3170
3171 if (copy_to_user(uset, &set, sigsetsize))
3172 return -EFAULT;
3173
3174 return 0;
3175}
3176
3177#ifdef CONFIG_COMPAT
3178COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3179 compat_size_t, sigsetsize)
3180{
3181 sigset_t set;
3182
3183 if (sigsetsize > sizeof(*uset))
3184 return -EINVAL;
3185
3186 do_sigpending(&set);
3187
3188 return put_compat_sigset(uset, &set, sigsetsize);
3189}
3190#endif
3191
3192static const struct {
3193 unsigned char limit, layout;
3194} sig_sicodes[] = {
3195 [SIGILL] = { NSIGILL, SIL_FAULT },
3196 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3197 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3198 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3199 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3200#if defined(SIGEMT)
3201 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3202#endif
3203 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3204 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3205 [SIGSYS] = { NSIGSYS, SIL_SYS },
3206};
3207
3208static bool known_siginfo_layout(unsigned sig, int si_code)
3209{
3210 if (si_code == SI_KERNEL)
3211 return true;
3212 else if ((si_code > SI_USER)) {
3213 if (sig_specific_sicodes(sig)) {
3214 if (si_code <= sig_sicodes[sig].limit)
3215 return true;
3216 }
3217 else if (si_code <= NSIGPOLL)
3218 return true;
3219 }
3220 else if (si_code >= SI_DETHREAD)
3221 return true;
3222 else if (si_code == SI_ASYNCNL)
3223 return true;
3224 return false;
3225}
3226
3227enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3228{
3229 enum siginfo_layout layout = SIL_KILL;
3230 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3231 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3232 (si_code <= sig_sicodes[sig].limit)) {
3233 layout = sig_sicodes[sig].layout;
3234
3235 if ((sig == SIGBUS) &&
3236 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3237 layout = SIL_FAULT_MCEERR;
3238 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3239 layout = SIL_FAULT_BNDERR;
3240#ifdef SEGV_PKUERR
3241 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3242 layout = SIL_FAULT_PKUERR;
3243#endif
3244 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3245 layout = SIL_PERF_EVENT;
3246#ifdef __ARCH_SI_TRAPNO
3247 else if (layout == SIL_FAULT)
3248 layout = SIL_FAULT_TRAPNO;
3249#endif
3250 }
3251 else if (si_code <= NSIGPOLL)
3252 layout = SIL_POLL;
3253 } else {
3254 if (si_code == SI_TIMER)
3255 layout = SIL_TIMER;
3256 else if (si_code == SI_SIGIO)
3257 layout = SIL_POLL;
3258 else if (si_code < 0)
3259 layout = SIL_RT;
3260 }
3261 return layout;
3262}
3263
3264static inline char __user *si_expansion(const siginfo_t __user *info)
3265{
3266 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3267}
3268
3269int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3270{
3271 char __user *expansion = si_expansion(to);
3272 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3273 return -EFAULT;
3274 if (clear_user(expansion, SI_EXPANSION_SIZE))
3275 return -EFAULT;
3276 return 0;
3277}
3278
3279static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3280 const siginfo_t __user *from)
3281{
3282 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3283 char __user *expansion = si_expansion(from);
3284 char buf[SI_EXPANSION_SIZE];
3285 int i;
3286
3287
3288
3289
3290
3291
3292 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3293 return -EFAULT;
3294 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3295 if (buf[i] != 0)
3296 return -E2BIG;
3297 }
3298 }
3299 return 0;
3300}
3301
3302static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3303 const siginfo_t __user *from)
3304{
3305 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3306 return -EFAULT;
3307 to->si_signo = signo;
3308 return post_copy_siginfo_from_user(to, from);
3309}
3310
3311int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3312{
3313 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3314 return -EFAULT;
3315 return post_copy_siginfo_from_user(to, from);
3316}
3317
3318#ifdef CONFIG_COMPAT
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329void copy_siginfo_to_external32(struct compat_siginfo *to,
3330 const struct kernel_siginfo *from)
3331{
3332 memset(to, 0, sizeof(*to));
3333
3334 to->si_signo = from->si_signo;
3335 to->si_errno = from->si_errno;
3336 to->si_code = from->si_code;
3337 switch(siginfo_layout(from->si_signo, from->si_code)) {
3338 case SIL_KILL:
3339 to->si_pid = from->si_pid;
3340 to->si_uid = from->si_uid;
3341 break;
3342 case SIL_TIMER:
3343 to->si_tid = from->si_tid;
3344 to->si_overrun = from->si_overrun;
3345 to->si_int = from->si_int;
3346 break;
3347 case SIL_POLL:
3348 to->si_band = from->si_band;
3349 to->si_fd = from->si_fd;
3350 break;
3351 case SIL_FAULT:
3352 to->si_addr = ptr_to_compat(from->si_addr);
3353 break;
3354 case SIL_FAULT_TRAPNO:
3355 to->si_addr = ptr_to_compat(from->si_addr);
3356 to->si_trapno = from->si_trapno;
3357 break;
3358 case SIL_FAULT_MCEERR:
3359 to->si_addr = ptr_to_compat(from->si_addr);
3360 to->si_addr_lsb = from->si_addr_lsb;
3361 break;
3362 case SIL_FAULT_BNDERR:
3363 to->si_addr = ptr_to_compat(from->si_addr);
3364 to->si_lower = ptr_to_compat(from->si_lower);
3365 to->si_upper = ptr_to_compat(from->si_upper);
3366 break;
3367 case SIL_FAULT_PKUERR:
3368 to->si_addr = ptr_to_compat(from->si_addr);
3369 to->si_pkey = from->si_pkey;
3370 break;
3371 case SIL_PERF_EVENT:
3372 to->si_addr = ptr_to_compat(from->si_addr);
3373 to->si_perf_data = from->si_perf_data;
3374 to->si_perf_type = from->si_perf_type;
3375 break;
3376 case SIL_CHLD:
3377 to->si_pid = from->si_pid;
3378 to->si_uid = from->si_uid;
3379 to->si_status = from->si_status;
3380 to->si_utime = from->si_utime;
3381 to->si_stime = from->si_stime;
3382 break;
3383 case SIL_RT:
3384 to->si_pid = from->si_pid;
3385 to->si_uid = from->si_uid;
3386 to->si_int = from->si_int;
3387 break;
3388 case SIL_SYS:
3389 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3390 to->si_syscall = from->si_syscall;
3391 to->si_arch = from->si_arch;
3392 break;
3393 }
3394}
3395
3396int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3397 const struct kernel_siginfo *from)
3398{
3399 struct compat_siginfo new;
3400
3401 copy_siginfo_to_external32(&new, from);
3402 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3403 return -EFAULT;
3404 return 0;
3405}
3406
3407static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3408 const struct compat_siginfo *from)
3409{
3410 clear_siginfo(to);
3411 to->si_signo = from->si_signo;
3412 to->si_errno = from->si_errno;
3413 to->si_code = from->si_code;
3414 switch(siginfo_layout(from->si_signo, from->si_code)) {
3415 case SIL_KILL:
3416 to->si_pid = from->si_pid;
3417 to->si_uid = from->si_uid;
3418 break;
3419 case SIL_TIMER:
3420 to->si_tid = from->si_tid;
3421 to->si_overrun = from->si_overrun;
3422 to->si_int = from->si_int;
3423 break;
3424 case SIL_POLL:
3425 to->si_band = from->si_band;
3426 to->si_fd = from->si_fd;
3427 break;
3428 case SIL_FAULT:
3429 to->si_addr = compat_ptr(from->si_addr);
3430 break;
3431 case SIL_FAULT_TRAPNO:
3432 to->si_addr = compat_ptr(from->si_addr);
3433 to->si_trapno = from->si_trapno;
3434 break;
3435 case SIL_FAULT_MCEERR:
3436 to->si_addr = compat_ptr(from->si_addr);
3437 to->si_addr_lsb = from->si_addr_lsb;
3438 break;
3439 case SIL_FAULT_BNDERR:
3440 to->si_addr = compat_ptr(from->si_addr);
3441 to->si_lower = compat_ptr(from->si_lower);
3442 to->si_upper = compat_ptr(from->si_upper);
3443 break;
3444 case SIL_FAULT_PKUERR:
3445 to->si_addr = compat_ptr(from->si_addr);
3446 to->si_pkey = from->si_pkey;
3447 break;
3448 case SIL_PERF_EVENT:
3449 to->si_addr = compat_ptr(from->si_addr);
3450 to->si_perf_data = from->si_perf_data;
3451 to->si_perf_type = from->si_perf_type;
3452 break;
3453 case SIL_CHLD:
3454 to->si_pid = from->si_pid;
3455 to->si_uid = from->si_uid;
3456 to->si_status = from->si_status;
3457#ifdef CONFIG_X86_X32_ABI
3458 if (in_x32_syscall()) {
3459 to->si_utime = from->_sifields._sigchld_x32._utime;
3460 to->si_stime = from->_sifields._sigchld_x32._stime;
3461 } else
3462#endif
3463 {
3464 to->si_utime = from->si_utime;
3465 to->si_stime = from->si_stime;
3466 }
3467 break;
3468 case SIL_RT:
3469 to->si_pid = from->si_pid;
3470 to->si_uid = from->si_uid;
3471 to->si_int = from->si_int;
3472 break;
3473 case SIL_SYS:
3474 to->si_call_addr = compat_ptr(from->si_call_addr);
3475 to->si_syscall = from->si_syscall;
3476 to->si_arch = from->si_arch;
3477 break;
3478 }
3479 return 0;
3480}
3481
3482static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3483 const struct compat_siginfo __user *ufrom)
3484{
3485 struct compat_siginfo from;
3486
3487 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3488 return -EFAULT;
3489
3490 from.si_signo = signo;
3491 return post_copy_siginfo_from_user32(to, &from);
3492}
3493
3494int copy_siginfo_from_user32(struct kernel_siginfo *to,
3495 const struct compat_siginfo __user *ufrom)
3496{
3497 struct compat_siginfo from;
3498
3499 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3500 return -EFAULT;
3501
3502 return post_copy_siginfo_from_user32(to, &from);
3503}
3504#endif
3505
3506
3507
3508
3509
3510
3511
3512static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3513 const struct timespec64 *ts)
3514{
3515 ktime_t *to = NULL, timeout = KTIME_MAX;
3516 struct task_struct *tsk = current;
3517 sigset_t mask = *which;
3518 int sig, ret = 0;
3519
3520 if (ts) {
3521 if (!timespec64_valid(ts))
3522 return -EINVAL;
3523 timeout = timespec64_to_ktime(*ts);
3524 to = &timeout;
3525 }
3526
3527
3528
3529
3530 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3531 signotset(&mask);
3532
3533 spin_lock_irq(&tsk->sighand->siglock);
3534 sig = dequeue_signal(tsk, &mask, info);
3535 if (!sig && timeout) {
3536
3537
3538
3539
3540
3541
3542 tsk->real_blocked = tsk->blocked;
3543 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3544 recalc_sigpending();
3545 spin_unlock_irq(&tsk->sighand->siglock);
3546
3547 __set_current_state(TASK_INTERRUPTIBLE);
3548 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3549 HRTIMER_MODE_REL);
3550 spin_lock_irq(&tsk->sighand->siglock);
3551 __set_task_blocked(tsk, &tsk->real_blocked);
3552 sigemptyset(&tsk->real_blocked);
3553 sig = dequeue_signal(tsk, &mask, info);
3554 }
3555 spin_unlock_irq(&tsk->sighand->siglock);
3556
3557 if (sig)
3558 return sig;
3559 return ret ? -EINTR : -EAGAIN;
3560}
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3571 siginfo_t __user *, uinfo,
3572 const struct __kernel_timespec __user *, uts,
3573 size_t, sigsetsize)
3574{
3575 sigset_t these;
3576 struct timespec64 ts;
3577 kernel_siginfo_t info;
3578 int ret;
3579
3580
3581 if (sigsetsize != sizeof(sigset_t))
3582 return -EINVAL;
3583
3584 if (copy_from_user(&these, uthese, sizeof(these)))
3585 return -EFAULT;
3586
3587 if (uts) {
3588 if (get_timespec64(&ts, uts))
3589 return -EFAULT;
3590 }
3591
3592 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3593
3594 if (ret > 0 && uinfo) {
3595 if (copy_siginfo_to_user(uinfo, &info))
3596 ret = -EFAULT;
3597 }
3598
3599 return ret;
3600}
3601
3602#ifdef CONFIG_COMPAT_32BIT_TIME
3603SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3604 siginfo_t __user *, uinfo,
3605 const struct old_timespec32 __user *, uts,
3606 size_t, sigsetsize)
3607{
3608 sigset_t these;
3609 struct timespec64 ts;
3610 kernel_siginfo_t info;
3611 int ret;
3612
3613 if (sigsetsize != sizeof(sigset_t))
3614 return -EINVAL;
3615
3616 if (copy_from_user(&these, uthese, sizeof(these)))
3617 return -EFAULT;
3618
3619 if (uts) {
3620 if (get_old_timespec32(&ts, uts))
3621 return -EFAULT;
3622 }
3623
3624 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3625
3626 if (ret > 0 && uinfo) {
3627 if (copy_siginfo_to_user(uinfo, &info))
3628 ret = -EFAULT;
3629 }
3630
3631 return ret;
3632}
3633#endif
3634
3635#ifdef CONFIG_COMPAT
3636COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3637 struct compat_siginfo __user *, uinfo,
3638 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3639{
3640 sigset_t s;
3641 struct timespec64 t;
3642 kernel_siginfo_t info;
3643 long ret;
3644
3645 if (sigsetsize != sizeof(sigset_t))
3646 return -EINVAL;
3647
3648 if (get_compat_sigset(&s, uthese))
3649 return -EFAULT;
3650
3651 if (uts) {
3652 if (get_timespec64(&t, uts))
3653 return -EFAULT;
3654 }
3655
3656 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3657
3658 if (ret > 0 && uinfo) {
3659 if (copy_siginfo_to_user32(uinfo, &info))
3660 ret = -EFAULT;
3661 }
3662
3663 return ret;
3664}
3665
3666#ifdef CONFIG_COMPAT_32BIT_TIME
3667COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3668 struct compat_siginfo __user *, uinfo,
3669 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3670{
3671 sigset_t s;
3672 struct timespec64 t;
3673 kernel_siginfo_t info;
3674 long ret;
3675
3676 if (sigsetsize != sizeof(sigset_t))
3677 return -EINVAL;
3678
3679 if (get_compat_sigset(&s, uthese))
3680 return -EFAULT;
3681
3682 if (uts) {
3683 if (get_old_timespec32(&t, uts))
3684 return -EFAULT;
3685 }
3686
3687 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3688
3689 if (ret > 0 && uinfo) {
3690 if (copy_siginfo_to_user32(uinfo, &info))
3691 ret = -EFAULT;
3692 }
3693
3694 return ret;
3695}
3696#endif
3697#endif
3698
3699static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3700{
3701 clear_siginfo(info);
3702 info->si_signo = sig;
3703 info->si_errno = 0;
3704 info->si_code = SI_USER;
3705 info->si_pid = task_tgid_vnr(current);
3706 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3707}
3708
3709
3710
3711
3712
3713
3714SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3715{
3716 struct kernel_siginfo info;
3717
3718 prepare_kill_siginfo(sig, &info);
3719
3720 return kill_something_info(sig, &info, pid);
3721}
3722
3723
3724
3725
3726
3727
3728static bool access_pidfd_pidns(struct pid *pid)
3729{
3730 struct pid_namespace *active = task_active_pid_ns(current);
3731 struct pid_namespace *p = ns_of_pid(pid);
3732
3733 for (;;) {
3734 if (!p)
3735 return false;
3736 if (p == active)
3737 break;
3738 p = p->parent;
3739 }
3740
3741 return true;
3742}
3743
3744static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3745 siginfo_t __user *info)
3746{
3747#ifdef CONFIG_COMPAT
3748
3749
3750
3751
3752
3753 if (in_compat_syscall())
3754 return copy_siginfo_from_user32(
3755 kinfo, (struct compat_siginfo __user *)info);
3756#endif
3757 return copy_siginfo_from_user(kinfo, info);
3758}
3759
3760static struct pid *pidfd_to_pid(const struct file *file)
3761{
3762 struct pid *pid;
3763
3764 pid = pidfd_pid(file);
3765 if (!IS_ERR(pid))
3766 return pid;
3767
3768 return tgid_pidfd_to_pid(file);
3769}
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3790 siginfo_t __user *, info, unsigned int, flags)
3791{
3792 int ret;
3793 struct fd f;
3794 struct pid *pid;
3795 kernel_siginfo_t kinfo;
3796
3797
3798 if (flags)
3799 return -EINVAL;
3800
3801 f = fdget(pidfd);
3802 if (!f.file)
3803 return -EBADF;
3804
3805
3806 pid = pidfd_to_pid(f.file);
3807 if (IS_ERR(pid)) {
3808 ret = PTR_ERR(pid);
3809 goto err;
3810 }
3811
3812 ret = -EINVAL;
3813 if (!access_pidfd_pidns(pid))
3814 goto err;
3815
3816 if (info) {
3817 ret = copy_siginfo_from_user_any(&kinfo, info);
3818 if (unlikely(ret))
3819 goto err;
3820
3821 ret = -EINVAL;
3822 if (unlikely(sig != kinfo.si_signo))
3823 goto err;
3824
3825
3826 ret = -EPERM;
3827 if ((task_pid(current) != pid) &&
3828 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3829 goto err;
3830 } else {
3831 prepare_kill_siginfo(sig, &kinfo);
3832 }
3833
3834 ret = kill_pid_info(sig, &kinfo, pid);
3835
3836err:
3837 fdput(f);
3838 return ret;
3839}
3840
3841static int
3842do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3843{
3844 struct task_struct *p;
3845 int error = -ESRCH;
3846
3847 rcu_read_lock();
3848 p = find_task_by_vpid(pid);
3849 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3850 error = check_kill_permission(sig, info, p);
3851
3852
3853
3854
3855 if (!error && sig) {
3856 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3857
3858
3859
3860
3861
3862 if (unlikely(error == -ESRCH))
3863 error = 0;
3864 }
3865 }
3866 rcu_read_unlock();
3867
3868 return error;
3869}
3870
3871static int do_tkill(pid_t tgid, pid_t pid, int sig)
3872{
3873 struct kernel_siginfo info;
3874
3875 clear_siginfo(&info);
3876 info.si_signo = sig;
3877 info.si_errno = 0;
3878 info.si_code = SI_TKILL;
3879 info.si_pid = task_tgid_vnr(current);
3880 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3881
3882 return do_send_specific(tgid, pid, sig, &info);
3883}
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3896{
3897
3898 if (pid <= 0 || tgid <= 0)
3899 return -EINVAL;
3900
3901 return do_tkill(tgid, pid, sig);
3902}
3903
3904
3905
3906
3907
3908
3909
3910
3911SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3912{
3913
3914 if (pid <= 0)
3915 return -EINVAL;
3916
3917 return do_tkill(0, pid, sig);
3918}
3919
3920static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3921{
3922
3923
3924
3925 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3926 (task_pid_vnr(current) != pid))
3927 return -EPERM;
3928
3929
3930 return kill_proc_info(sig, info, pid);
3931}
3932
3933
3934
3935
3936
3937
3938
3939SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3940 siginfo_t __user *, uinfo)
3941{
3942 kernel_siginfo_t info;
3943 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3944 if (unlikely(ret))
3945 return ret;
3946 return do_rt_sigqueueinfo(pid, sig, &info);
3947}
3948
3949#ifdef CONFIG_COMPAT
3950COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3951 compat_pid_t, pid,
3952 int, sig,
3953 struct compat_siginfo __user *, uinfo)
3954{
3955 kernel_siginfo_t info;
3956 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3957 if (unlikely(ret))
3958 return ret;
3959 return do_rt_sigqueueinfo(pid, sig, &info);
3960}
3961#endif
3962
3963static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3964{
3965
3966 if (pid <= 0 || tgid <= 0)
3967 return -EINVAL;
3968
3969
3970
3971
3972 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3973 (task_pid_vnr(current) != pid))
3974 return -EPERM;
3975
3976 return do_send_specific(tgid, pid, sig, info);
3977}
3978
3979SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3980 siginfo_t __user *, uinfo)
3981{
3982 kernel_siginfo_t info;
3983 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3984 if (unlikely(ret))
3985 return ret;
3986 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3987}
3988
3989#ifdef CONFIG_COMPAT
3990COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3991 compat_pid_t, tgid,
3992 compat_pid_t, pid,
3993 int, sig,
3994 struct compat_siginfo __user *, uinfo)
3995{
3996 kernel_siginfo_t info;
3997 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3998 if (unlikely(ret))
3999 return ret;
4000 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4001}
4002#endif
4003
4004
4005
4006
4007void kernel_sigaction(int sig, __sighandler_t action)
4008{
4009 spin_lock_irq(¤t->sighand->siglock);
4010 current->sighand->action[sig - 1].sa.sa_handler = action;
4011 if (action == SIG_IGN) {
4012 sigset_t mask;
4013
4014 sigemptyset(&mask);
4015 sigaddset(&mask, sig);
4016
4017 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4018 flush_sigqueue_mask(&mask, ¤t->pending);
4019 recalc_sigpending();
4020 }
4021 spin_unlock_irq(¤t->sighand->siglock);
4022}
4023EXPORT_SYMBOL(kernel_sigaction);
4024
4025void __weak sigaction_compat_abi(struct k_sigaction *act,
4026 struct k_sigaction *oact)
4027{
4028}
4029
4030int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4031{
4032 struct task_struct *p = current, *t;
4033 struct k_sigaction *k;
4034 sigset_t mask;
4035
4036 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4037 return -EINVAL;
4038
4039 k = &p->sighand->action[sig-1];
4040
4041 spin_lock_irq(&p->sighand->siglock);
4042 if (oact)
4043 *oact = *k;
4044
4045
4046
4047
4048
4049 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4050
4051
4052
4053
4054
4055
4056 if (act)
4057 act->sa.sa_flags &= UAPI_SA_FLAGS;
4058 if (oact)
4059 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4060
4061 sigaction_compat_abi(act, oact);
4062
4063 if (act) {
4064 sigdelsetmask(&act->sa.sa_mask,
4065 sigmask(SIGKILL) | sigmask(SIGSTOP));
4066 *k = *act;
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4079 sigemptyset(&mask);
4080 sigaddset(&mask, sig);
4081 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4082 for_each_thread(p, t)
4083 flush_sigqueue_mask(&mask, &t->pending);
4084 }
4085 }
4086
4087 spin_unlock_irq(&p->sighand->siglock);
4088 return 0;
4089}
4090
4091static int
4092do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4093 size_t min_ss_size)
4094{
4095 struct task_struct *t = current;
4096
4097 if (oss) {
4098 memset(oss, 0, sizeof(stack_t));
4099 oss->ss_sp = (void __user *) t->sas_ss_sp;
4100 oss->ss_size = t->sas_ss_size;
4101 oss->ss_flags = sas_ss_flags(sp) |
4102 (current->sas_ss_flags & SS_FLAG_BITS);
4103 }
4104
4105 if (ss) {
4106 void __user *ss_sp = ss->ss_sp;
4107 size_t ss_size = ss->ss_size;
4108 unsigned ss_flags = ss->ss_flags;
4109 int ss_mode;
4110
4111 if (unlikely(on_sig_stack(sp)))
4112 return -EPERM;
4113
4114 ss_mode = ss_flags & ~SS_FLAG_BITS;
4115 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4116 ss_mode != 0))
4117 return -EINVAL;
4118
4119 if (ss_mode == SS_DISABLE) {
4120 ss_size = 0;
4121 ss_sp = NULL;
4122 } else {
4123 if (unlikely(ss_size < min_ss_size))
4124 return -ENOMEM;
4125 }
4126
4127 t->sas_ss_sp = (unsigned long) ss_sp;
4128 t->sas_ss_size = ss_size;
4129 t->sas_ss_flags = ss_flags;
4130 }
4131 return 0;
4132}
4133
4134SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4135{
4136 stack_t new, old;
4137 int err;
4138 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4139 return -EFAULT;
4140 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4141 current_user_stack_pointer(),
4142 MINSIGSTKSZ);
4143 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4144 err = -EFAULT;
4145 return err;
4146}
4147
4148int restore_altstack(const stack_t __user *uss)
4149{
4150 stack_t new;
4151 if (copy_from_user(&new, uss, sizeof(stack_t)))
4152 return -EFAULT;
4153 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4154 MINSIGSTKSZ);
4155
4156 return 0;
4157}
4158
4159int __save_altstack(stack_t __user *uss, unsigned long sp)
4160{
4161 struct task_struct *t = current;
4162 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4163 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4164 __put_user(t->sas_ss_size, &uss->ss_size);
4165 return err;
4166}
4167
4168#ifdef CONFIG_COMPAT
4169static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4170 compat_stack_t __user *uoss_ptr)
4171{
4172 stack_t uss, uoss;
4173 int ret;
4174
4175 if (uss_ptr) {
4176 compat_stack_t uss32;
4177 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4178 return -EFAULT;
4179 uss.ss_sp = compat_ptr(uss32.ss_sp);
4180 uss.ss_flags = uss32.ss_flags;
4181 uss.ss_size = uss32.ss_size;
4182 }
4183 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4184 compat_user_stack_pointer(),
4185 COMPAT_MINSIGSTKSZ);
4186 if (ret >= 0 && uoss_ptr) {
4187 compat_stack_t old;
4188 memset(&old, 0, sizeof(old));
4189 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4190 old.ss_flags = uoss.ss_flags;
4191 old.ss_size = uoss.ss_size;
4192 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4193 ret = -EFAULT;
4194 }
4195 return ret;
4196}
4197
4198COMPAT_SYSCALL_DEFINE2(sigaltstack,
4199 const compat_stack_t __user *, uss_ptr,
4200 compat_stack_t __user *, uoss_ptr)
4201{
4202 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4203}
4204
4205int compat_restore_altstack(const compat_stack_t __user *uss)
4206{
4207 int err = do_compat_sigaltstack(uss, NULL);
4208
4209 return err == -EFAULT ? err : 0;
4210}
4211
4212int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4213{
4214 int err;
4215 struct task_struct *t = current;
4216 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4217 &uss->ss_sp) |
4218 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4219 __put_user(t->sas_ss_size, &uss->ss_size);
4220 return err;
4221}
4222#endif
4223
4224#ifdef __ARCH_WANT_SYS_SIGPENDING
4225
4226
4227
4228
4229
4230SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4231{
4232 sigset_t set;
4233
4234 if (sizeof(old_sigset_t) > sizeof(*uset))
4235 return -EINVAL;
4236
4237 do_sigpending(&set);
4238
4239 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4240 return -EFAULT;
4241
4242 return 0;
4243}
4244
4245#ifdef CONFIG_COMPAT
4246COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4247{
4248 sigset_t set;
4249
4250 do_sigpending(&set);
4251
4252 return put_user(set.sig[0], set32);
4253}
4254#endif
4255
4256#endif
4257
4258#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4270 old_sigset_t __user *, oset)
4271{
4272 old_sigset_t old_set, new_set;
4273 sigset_t new_blocked;
4274
4275 old_set = current->blocked.sig[0];
4276
4277 if (nset) {
4278 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4279 return -EFAULT;
4280
4281 new_blocked = current->blocked;
4282
4283 switch (how) {
4284 case SIG_BLOCK:
4285 sigaddsetmask(&new_blocked, new_set);
4286 break;
4287 case SIG_UNBLOCK:
4288 sigdelsetmask(&new_blocked, new_set);
4289 break;
4290 case SIG_SETMASK:
4291 new_blocked.sig[0] = new_set;
4292 break;
4293 default:
4294 return -EINVAL;
4295 }
4296
4297 set_current_blocked(&new_blocked);
4298 }
4299
4300 if (oset) {
4301 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4302 return -EFAULT;
4303 }
4304
4305 return 0;
4306}
4307#endif
4308
4309#ifndef CONFIG_ODD_RT_SIGACTION
4310
4311
4312
4313
4314
4315
4316
4317SYSCALL_DEFINE4(rt_sigaction, int, sig,
4318 const struct sigaction __user *, act,
4319 struct sigaction __user *, oact,
4320 size_t, sigsetsize)
4321{
4322 struct k_sigaction new_sa, old_sa;
4323 int ret;
4324
4325
4326 if (sigsetsize != sizeof(sigset_t))
4327 return -EINVAL;
4328
4329 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4330 return -EFAULT;
4331
4332 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4333 if (ret)
4334 return ret;
4335
4336 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4337 return -EFAULT;
4338
4339 return 0;
4340}
4341#ifdef CONFIG_COMPAT
4342COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4343 const struct compat_sigaction __user *, act,
4344 struct compat_sigaction __user *, oact,
4345 compat_size_t, sigsetsize)
4346{
4347 struct k_sigaction new_ka, old_ka;
4348#ifdef __ARCH_HAS_SA_RESTORER
4349 compat_uptr_t restorer;
4350#endif
4351 int ret;
4352
4353
4354 if (sigsetsize != sizeof(compat_sigset_t))
4355 return -EINVAL;
4356
4357 if (act) {
4358 compat_uptr_t handler;
4359 ret = get_user(handler, &act->sa_handler);
4360 new_ka.sa.sa_handler = compat_ptr(handler);
4361#ifdef __ARCH_HAS_SA_RESTORER
4362 ret |= get_user(restorer, &act->sa_restorer);
4363 new_ka.sa.sa_restorer = compat_ptr(restorer);
4364#endif
4365 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4366 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4367 if (ret)
4368 return -EFAULT;
4369 }
4370
4371 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4372 if (!ret && oact) {
4373 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4374 &oact->sa_handler);
4375 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4376 sizeof(oact->sa_mask));
4377 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4378#ifdef __ARCH_HAS_SA_RESTORER
4379 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4380 &oact->sa_restorer);
4381#endif
4382 }
4383 return ret;
4384}
4385#endif
4386#endif
4387
4388#ifdef CONFIG_OLD_SIGACTION
4389SYSCALL_DEFINE3(sigaction, int, sig,
4390 const struct old_sigaction __user *, act,
4391 struct old_sigaction __user *, oact)
4392{
4393 struct k_sigaction new_ka, old_ka;
4394 int ret;
4395
4396 if (act) {
4397 old_sigset_t mask;
4398 if (!access_ok(act, sizeof(*act)) ||
4399 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4400 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4401 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4402 __get_user(mask, &act->sa_mask))
4403 return -EFAULT;
4404#ifdef __ARCH_HAS_KA_RESTORER
4405 new_ka.ka_restorer = NULL;
4406#endif
4407 siginitset(&new_ka.sa.sa_mask, mask);
4408 }
4409
4410 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4411
4412 if (!ret && oact) {
4413 if (!access_ok(oact, sizeof(*oact)) ||
4414 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4415 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4416 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4417 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4418 return -EFAULT;
4419 }
4420
4421 return ret;
4422}
4423#endif
4424#ifdef CONFIG_COMPAT_OLD_SIGACTION
4425COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4426 const struct compat_old_sigaction __user *, act,
4427 struct compat_old_sigaction __user *, oact)
4428{
4429 struct k_sigaction new_ka, old_ka;
4430 int ret;
4431 compat_old_sigset_t mask;
4432 compat_uptr_t handler, restorer;
4433
4434 if (act) {
4435 if (!access_ok(act, sizeof(*act)) ||
4436 __get_user(handler, &act->sa_handler) ||
4437 __get_user(restorer, &act->sa_restorer) ||
4438 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4439 __get_user(mask, &act->sa_mask))
4440 return -EFAULT;
4441
4442#ifdef __ARCH_HAS_KA_RESTORER
4443 new_ka.ka_restorer = NULL;
4444#endif
4445 new_ka.sa.sa_handler = compat_ptr(handler);
4446 new_ka.sa.sa_restorer = compat_ptr(restorer);
4447 siginitset(&new_ka.sa.sa_mask, mask);
4448 }
4449
4450 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4451
4452 if (!ret && oact) {
4453 if (!access_ok(oact, sizeof(*oact)) ||
4454 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4455 &oact->sa_handler) ||
4456 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4457 &oact->sa_restorer) ||
4458 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4459 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4460 return -EFAULT;
4461 }
4462 return ret;
4463}
4464#endif
4465
4466#ifdef CONFIG_SGETMASK_SYSCALL
4467
4468
4469
4470
4471SYSCALL_DEFINE0(sgetmask)
4472{
4473
4474 return current->blocked.sig[0];
4475}
4476
4477SYSCALL_DEFINE1(ssetmask, int, newmask)
4478{
4479 int old = current->blocked.sig[0];
4480 sigset_t newset;
4481
4482 siginitset(&newset, newmask);
4483 set_current_blocked(&newset);
4484
4485 return old;
4486}
4487#endif
4488
4489#ifdef __ARCH_WANT_SYS_SIGNAL
4490
4491
4492
4493SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4494{
4495 struct k_sigaction new_sa, old_sa;
4496 int ret;
4497
4498 new_sa.sa.sa_handler = handler;
4499 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4500 sigemptyset(&new_sa.sa.sa_mask);
4501
4502 ret = do_sigaction(sig, &new_sa, &old_sa);
4503
4504 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4505}
4506#endif
4507
4508#ifdef __ARCH_WANT_SYS_PAUSE
4509
4510SYSCALL_DEFINE0(pause)
4511{
4512 while (!signal_pending(current)) {
4513 __set_current_state(TASK_INTERRUPTIBLE);
4514 schedule();
4515 }
4516 return -ERESTARTNOHAND;
4517}
4518
4519#endif
4520
4521static int sigsuspend(sigset_t *set)
4522{
4523 current->saved_sigmask = current->blocked;
4524 set_current_blocked(set);
4525
4526 while (!signal_pending(current)) {
4527 __set_current_state(TASK_INTERRUPTIBLE);
4528 schedule();
4529 }
4530 set_restore_sigmask();
4531 return -ERESTARTNOHAND;
4532}
4533
4534
4535
4536
4537
4538
4539
4540SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4541{
4542 sigset_t newset;
4543
4544
4545 if (sigsetsize != sizeof(sigset_t))
4546 return -EINVAL;
4547
4548 if (copy_from_user(&newset, unewset, sizeof(newset)))
4549 return -EFAULT;
4550 return sigsuspend(&newset);
4551}
4552
4553#ifdef CONFIG_COMPAT
4554COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4555{
4556 sigset_t newset;
4557
4558
4559 if (sigsetsize != sizeof(sigset_t))
4560 return -EINVAL;
4561
4562 if (get_compat_sigset(&newset, unewset))
4563 return -EFAULT;
4564 return sigsuspend(&newset);
4565}
4566#endif
4567
4568#ifdef CONFIG_OLD_SIGSUSPEND
4569SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4570{
4571 sigset_t blocked;
4572 siginitset(&blocked, mask);
4573 return sigsuspend(&blocked);
4574}
4575#endif
4576#ifdef CONFIG_OLD_SIGSUSPEND3
4577SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4578{
4579 sigset_t blocked;
4580 siginitset(&blocked, mask);
4581 return sigsuspend(&blocked);
4582}
4583#endif
4584
4585__weak const char *arch_vma_name(struct vm_area_struct *vma)
4586{
4587 return NULL;
4588}
4589
4590static inline void siginfo_buildtime_checks(void)
4591{
4592 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4593
4594
4595#define CHECK_OFFSET(field) \
4596 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4597
4598
4599 CHECK_OFFSET(si_pid);
4600 CHECK_OFFSET(si_uid);
4601
4602
4603 CHECK_OFFSET(si_tid);
4604 CHECK_OFFSET(si_overrun);
4605 CHECK_OFFSET(si_value);
4606
4607
4608 CHECK_OFFSET(si_pid);
4609 CHECK_OFFSET(si_uid);
4610 CHECK_OFFSET(si_value);
4611
4612
4613 CHECK_OFFSET(si_pid);
4614 CHECK_OFFSET(si_uid);
4615 CHECK_OFFSET(si_status);
4616 CHECK_OFFSET(si_utime);
4617 CHECK_OFFSET(si_stime);
4618
4619
4620 CHECK_OFFSET(si_addr);
4621 CHECK_OFFSET(si_trapno);
4622 CHECK_OFFSET(si_addr_lsb);
4623 CHECK_OFFSET(si_lower);
4624 CHECK_OFFSET(si_upper);
4625 CHECK_OFFSET(si_pkey);
4626 CHECK_OFFSET(si_perf_data);
4627 CHECK_OFFSET(si_perf_type);
4628
4629
4630 CHECK_OFFSET(si_band);
4631 CHECK_OFFSET(si_fd);
4632
4633
4634 CHECK_OFFSET(si_call_addr);
4635 CHECK_OFFSET(si_syscall);
4636 CHECK_OFFSET(si_arch);
4637#undef CHECK_OFFSET
4638
4639
4640 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4641 offsetof(struct siginfo, si_addr));
4642 if (sizeof(int) == sizeof(void __user *)) {
4643 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4644 sizeof(void __user *));
4645 } else {
4646 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4647 sizeof_field(struct siginfo, si_uid)) !=
4648 sizeof(void __user *));
4649 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4650 offsetof(struct siginfo, si_uid));
4651 }
4652#ifdef CONFIG_COMPAT
4653 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4654 offsetof(struct compat_siginfo, si_addr));
4655 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4656 sizeof(compat_uptr_t));
4657 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4658 sizeof_field(struct siginfo, si_pid));
4659#endif
4660}
4661
4662void __init signals_init(void)
4663{
4664 siginfo_buildtime_checks();
4665
4666 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4667}
4668
4669#ifdef CONFIG_KGDB_KDB
4670#include <linux/kdb.h>
4671
4672
4673
4674
4675
4676
4677void kdb_send_sig(struct task_struct *t, int sig)
4678{
4679 static struct task_struct *kdb_prev_t;
4680 int new_t, ret;
4681 if (!spin_trylock(&t->sighand->siglock)) {
4682 kdb_printf("Can't do kill command now.\n"
4683 "The sigmask lock is held somewhere else in "
4684 "kernel, try again later\n");
4685 return;
4686 }
4687 new_t = kdb_prev_t != t;
4688 kdb_prev_t = t;
4689 if (!task_is_running(t) && new_t) {
4690 spin_unlock(&t->sighand->siglock);
4691 kdb_printf("Process is not RUNNING, sending a signal from "
4692 "kdb risks deadlock\n"
4693 "on the run queue locks. "
4694 "The signal has _not_ been sent.\n"
4695 "Reissue the kill command if you want to risk "
4696 "the deadlock.\n");
4697 return;
4698 }
4699 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4700 spin_unlock(&t->sighand->siglock);
4701 if (ret)
4702 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4703 sig, t->pid);
4704 else
4705 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4706}
4707#endif
4708