1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/init.h>
17#include <linux/sched/mm.h>
18#include <linux/sched/user.h>
19#include <linux/sched/debug.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/sched/cputime.h>
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/proc_fs.h>
26#include <linux/tty.h>
27#include <linux/binfmts.h>
28#include <linux/coredump.h>
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/ptrace.h>
32#include <linux/signal.h>
33#include <linux/signalfd.h>
34#include <linux/ratelimit.h>
35#include <linux/tracehook.h>
36#include <linux/capability.h>
37#include <linux/freezer.h>
38#include <linux/pid_namespace.h>
39#include <linux/nsproxy.h>
40#include <linux/user_namespace.h>
41#include <linux/uprobes.h>
42#include <linux/compat.h>
43#include <linux/cn_proc.h>
44#include <linux/compiler.h>
45#include <linux/posix-timers.h>
46#include <linux/livepatch.h>
47#include <linux/cgroup.h>
48
49#define CREATE_TRACE_POINTS
50#include <trace/events/signal.h>
51
52#include <asm/param.h>
53#include <linux/uaccess.h>
54#include <asm/unistd.h>
55#include <asm/siginfo.h>
56#include <asm/cacheflush.h>
57#include "audit.h"
58
59
60
61
62
63static struct kmem_cache *sigqueue_cachep;
64
65int print_fatal_signals __read_mostly;
66
67static void __user *sig_handler(struct task_struct *t, int sig)
68{
69 return t->sighand->action[sig - 1].sa.sa_handler;
70}
71
72static inline bool sig_handler_ignored(void __user *handler, int sig)
73{
74
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
77}
78
79static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
80{
81 void __user *handler;
82
83 handler = sig_handler(t, sig);
84
85
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87 return true;
88
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 return true;
92
93 return sig_handler_ignored(handler, sig);
94}
95
96static bool sig_ignored(struct task_struct *t, int sig, bool force)
97{
98
99
100
101
102
103 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
104 return false;
105
106
107
108
109
110
111 if (t->ptrace && sig != SIGKILL)
112 return false;
113
114 return sig_task_ignored(t, sig, force);
115}
116
117
118
119
120
121static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
122{
123 unsigned long ready;
124 long i;
125
126 switch (_NSIG_WORDS) {
127 default:
128 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
129 ready |= signal->sig[i] &~ blocked->sig[i];
130 break;
131
132 case 4: ready = signal->sig[3] &~ blocked->sig[3];
133 ready |= signal->sig[2] &~ blocked->sig[2];
134 ready |= signal->sig[1] &~ blocked->sig[1];
135 ready |= signal->sig[0] &~ blocked->sig[0];
136 break;
137
138 case 2: ready = signal->sig[1] &~ blocked->sig[1];
139 ready |= signal->sig[0] &~ blocked->sig[0];
140 break;
141
142 case 1: ready = signal->sig[0] &~ blocked->sig[0];
143 }
144 return ready != 0;
145}
146
147#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
148
149static bool recalc_sigpending_tsk(struct task_struct *t)
150{
151 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
152 PENDING(&t->pending, &t->blocked) ||
153 PENDING(&t->signal->shared_pending, &t->blocked) ||
154 cgroup_task_frozen(t)) {
155 set_tsk_thread_flag(t, TIF_SIGPENDING);
156 return true;
157 }
158
159
160
161
162
163
164 return false;
165}
166
167
168
169
170
171void recalc_sigpending_and_wake(struct task_struct *t)
172{
173 if (recalc_sigpending_tsk(t))
174 signal_wake_up(t, 0);
175}
176
177void recalc_sigpending(void)
178{
179 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
180 !klp_patch_pending(current))
181 clear_thread_flag(TIF_SIGPENDING);
182
183}
184EXPORT_SYMBOL(recalc_sigpending);
185
186void calculate_sigpending(void)
187{
188
189
190
191 spin_lock_irq(¤t->sighand->siglock);
192 set_tsk_thread_flag(current, TIF_SIGPENDING);
193 recalc_sigpending();
194 spin_unlock_irq(¤t->sighand->siglock);
195}
196
197
198
199#define SYNCHRONOUS_MASK \
200 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
201 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
202
203int next_signal(struct sigpending *pending, sigset_t *mask)
204{
205 unsigned long i, *s, *m, x;
206 int sig = 0;
207
208 s = pending->signal.sig;
209 m = mask->sig;
210
211
212
213
214
215 x = *s &~ *m;
216 if (x) {
217 if (x & SYNCHRONOUS_MASK)
218 x &= SYNCHRONOUS_MASK;
219 sig = ffz(~x) + 1;
220 return sig;
221 }
222
223 switch (_NSIG_WORDS) {
224 default:
225 for (i = 1; i < _NSIG_WORDS; ++i) {
226 x = *++s &~ *++m;
227 if (!x)
228 continue;
229 sig = ffz(~x) + i*_NSIG_BPW + 1;
230 break;
231 }
232 break;
233
234 case 2:
235 x = s[1] &~ m[1];
236 if (!x)
237 break;
238 sig = ffz(~x) + _NSIG_BPW + 1;
239 break;
240
241 case 1:
242
243 break;
244 }
245
246 return sig;
247}
248
249static inline void print_dropped_signal(int sig)
250{
251 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
252
253 if (!print_fatal_signals)
254 return;
255
256 if (!__ratelimit(&ratelimit_state))
257 return;
258
259 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
260 current->comm, current->pid, sig);
261}
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
281{
282 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
283 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
284 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
285
286 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
287 return false;
288
289 if (mask & JOBCTL_STOP_SIGMASK)
290 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
291
292 task->jobctl |= mask;
293 return true;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308void task_clear_jobctl_trapping(struct task_struct *task)
309{
310 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
311 task->jobctl &= ~JOBCTL_TRAPPING;
312 smp_mb();
313 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
314 }
315}
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
333{
334 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
335
336 if (mask & JOBCTL_STOP_PENDING)
337 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
338
339 task->jobctl &= ~mask;
340
341 if (!(task->jobctl & JOBCTL_PENDING_MASK))
342 task_clear_jobctl_trapping(task);
343}
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361static bool task_participate_group_stop(struct task_struct *task)
362{
363 struct signal_struct *sig = task->signal;
364 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
365
366 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
367
368 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
369
370 if (!consume)
371 return false;
372
373 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
374 sig->group_stop_count--;
375
376
377
378
379
380 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
381 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
382 return true;
383 }
384 return false;
385}
386
387void task_join_group_stop(struct task_struct *task)
388{
389
390 unsigned long jobctl = current->jobctl;
391 if (jobctl & JOBCTL_STOP_PENDING) {
392 struct signal_struct *sig = current->signal;
393 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
394 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
395 if (task_set_jobctl_pending(task, signr | gstop)) {
396 sig->group_stop_count++;
397 }
398 }
399}
400
401
402
403
404
405
406static struct sigqueue *
407__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
408{
409 struct sigqueue *q = NULL;
410 struct user_struct *user;
411
412
413
414
415
416 rcu_read_lock();
417 user = get_uid(__task_cred(t)->user);
418 atomic_inc(&user->sigpending);
419 rcu_read_unlock();
420
421 if (override_rlimit ||
422 atomic_read(&user->sigpending) <=
423 task_rlimit(t, RLIMIT_SIGPENDING)) {
424 q = kmem_cache_alloc(sigqueue_cachep, flags);
425 } else {
426 print_dropped_signal(sig);
427 }
428
429 if (unlikely(q == NULL)) {
430 atomic_dec(&user->sigpending);
431 free_uid(user);
432 } else {
433 INIT_LIST_HEAD(&q->list);
434 q->flags = 0;
435 q->user = user;
436 }
437
438 return q;
439}
440
441static void __sigqueue_free(struct sigqueue *q)
442{
443 if (q->flags & SIGQUEUE_PREALLOC)
444 return;
445 atomic_dec(&q->user->sigpending);
446 free_uid(q->user);
447 kmem_cache_free(sigqueue_cachep, q);
448}
449
450void flush_sigqueue(struct sigpending *queue)
451{
452 struct sigqueue *q;
453
454 sigemptyset(&queue->signal);
455 while (!list_empty(&queue->list)) {
456 q = list_entry(queue->list.next, struct sigqueue , list);
457 list_del_init(&q->list);
458 __sigqueue_free(q);
459 }
460}
461
462
463
464
465void flush_signals(struct task_struct *t)
466{
467 unsigned long flags;
468
469 spin_lock_irqsave(&t->sighand->siglock, flags);
470 clear_tsk_thread_flag(t, TIF_SIGPENDING);
471 flush_sigqueue(&t->pending);
472 flush_sigqueue(&t->signal->shared_pending);
473 spin_unlock_irqrestore(&t->sighand->siglock, flags);
474}
475EXPORT_SYMBOL(flush_signals);
476
477#ifdef CONFIG_POSIX_TIMERS
478static void __flush_itimer_signals(struct sigpending *pending)
479{
480 sigset_t signal, retain;
481 struct sigqueue *q, *n;
482
483 signal = pending->signal;
484 sigemptyset(&retain);
485
486 list_for_each_entry_safe(q, n, &pending->list, list) {
487 int sig = q->info.si_signo;
488
489 if (likely(q->info.si_code != SI_TIMER)) {
490 sigaddset(&retain, sig);
491 } else {
492 sigdelset(&signal, sig);
493 list_del_init(&q->list);
494 __sigqueue_free(q);
495 }
496 }
497
498 sigorsets(&pending->signal, &signal, &retain);
499}
500
501void flush_itimer_signals(void)
502{
503 struct task_struct *tsk = current;
504 unsigned long flags;
505
506 spin_lock_irqsave(&tsk->sighand->siglock, flags);
507 __flush_itimer_signals(&tsk->pending);
508 __flush_itimer_signals(&tsk->signal->shared_pending);
509 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
510}
511#endif
512
513void ignore_signals(struct task_struct *t)
514{
515 int i;
516
517 for (i = 0; i < _NSIG; ++i)
518 t->sighand->action[i].sa.sa_handler = SIG_IGN;
519
520 flush_signals(t);
521}
522
523
524
525
526
527void
528flush_signal_handlers(struct task_struct *t, int force_default)
529{
530 int i;
531 struct k_sigaction *ka = &t->sighand->action[0];
532 for (i = _NSIG ; i != 0 ; i--) {
533 if (force_default || ka->sa.sa_handler != SIG_IGN)
534 ka->sa.sa_handler = SIG_DFL;
535 ka->sa.sa_flags = 0;
536#ifdef __ARCH_HAS_SA_RESTORER
537 ka->sa.sa_restorer = NULL;
538#endif
539 sigemptyset(&ka->sa.sa_mask);
540 ka++;
541 }
542}
543
544bool unhandled_signal(struct task_struct *tsk, int sig)
545{
546 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
547 if (is_global_init(tsk))
548 return true;
549
550 if (handler != SIG_IGN && handler != SIG_DFL)
551 return false;
552
553
554 return !tsk->ptrace;
555}
556
557static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
558 bool *resched_timer)
559{
560 struct sigqueue *q, *first = NULL;
561
562
563
564
565
566 list_for_each_entry(q, &list->list, list) {
567 if (q->info.si_signo == sig) {
568 if (first)
569 goto still_pending;
570 first = q;
571 }
572 }
573
574 sigdelset(&list->signal, sig);
575
576 if (first) {
577still_pending:
578 list_del_init(&first->list);
579 copy_siginfo(info, &first->info);
580
581 *resched_timer =
582 (first->flags & SIGQUEUE_PREALLOC) &&
583 (info->si_code == SI_TIMER) &&
584 (info->si_sys_private);
585
586 __sigqueue_free(first);
587 } else {
588
589
590
591
592
593 clear_siginfo(info);
594 info->si_signo = sig;
595 info->si_errno = 0;
596 info->si_code = SI_USER;
597 info->si_pid = 0;
598 info->si_uid = 0;
599 }
600}
601
602static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
603 kernel_siginfo_t *info, bool *resched_timer)
604{
605 int sig = next_signal(pending, mask);
606
607 if (sig)
608 collect_signal(sig, pending, info, resched_timer);
609 return sig;
610}
611
612
613
614
615
616
617
618int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
619{
620 bool resched_timer = false;
621 int signr;
622
623
624
625
626 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
627 if (!signr) {
628 signr = __dequeue_signal(&tsk->signal->shared_pending,
629 mask, info, &resched_timer);
630#ifdef CONFIG_POSIX_TIMERS
631
632
633
634
635
636
637
638
639
640
641
642
643
644 if (unlikely(signr == SIGALRM)) {
645 struct hrtimer *tmr = &tsk->signal->real_timer;
646
647 if (!hrtimer_is_queued(tmr) &&
648 tsk->signal->it_real_incr != 0) {
649 hrtimer_forward(tmr, tmr->base->get_time(),
650 tsk->signal->it_real_incr);
651 hrtimer_restart(tmr);
652 }
653 }
654#endif
655 }
656
657 recalc_sigpending();
658 if (!signr)
659 return 0;
660
661 if (unlikely(sig_kernel_stop(signr))) {
662
663
664
665
666
667
668
669
670
671
672
673
674 current->jobctl |= JOBCTL_STOP_DEQUEUED;
675 }
676#ifdef CONFIG_POSIX_TIMERS
677 if (resched_timer) {
678
679
680
681
682
683
684 spin_unlock(&tsk->sighand->siglock);
685 posixtimer_rearm(info);
686 spin_lock(&tsk->sighand->siglock);
687
688
689 info->si_sys_private = 0;
690 }
691#endif
692 return signr;
693}
694EXPORT_SYMBOL_GPL(dequeue_signal);
695
696static int dequeue_synchronous_signal(kernel_siginfo_t *info)
697{
698 struct task_struct *tsk = current;
699 struct sigpending *pending = &tsk->pending;
700 struct sigqueue *q, *sync = NULL;
701
702
703
704
705 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
706 return 0;
707
708
709
710
711 list_for_each_entry(q, &pending->list, list) {
712
713 if ((q->info.si_code > SI_USER) &&
714 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
715 sync = q;
716 goto next;
717 }
718 }
719 return 0;
720next:
721
722
723
724 list_for_each_entry_continue(q, &pending->list, list) {
725 if (q->info.si_signo == sync->info.si_signo)
726 goto still_pending;
727 }
728
729 sigdelset(&pending->signal, sync->info.si_signo);
730 recalc_sigpending();
731still_pending:
732 list_del_init(&sync->list);
733 copy_siginfo(info, &sync->info);
734 __sigqueue_free(sync);
735 return info->si_signo;
736}
737
738
739
740
741
742
743
744
745
746
747
748
749void signal_wake_up_state(struct task_struct *t, unsigned int state)
750{
751 set_tsk_thread_flag(t, TIF_SIGPENDING);
752
753
754
755
756
757
758
759 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
760 kick_process(t);
761}
762
763
764
765
766
767
768
769static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
770{
771 struct sigqueue *q, *n;
772 sigset_t m;
773
774 sigandsets(&m, mask, &s->signal);
775 if (sigisemptyset(&m))
776 return;
777
778 sigandnsets(&s->signal, &s->signal, mask);
779 list_for_each_entry_safe(q, n, &s->list, list) {
780 if (sigismember(mask, q->info.si_signo)) {
781 list_del_init(&q->list);
782 __sigqueue_free(q);
783 }
784 }
785}
786
787static inline int is_si_special(const struct kernel_siginfo *info)
788{
789 return info <= SEND_SIG_PRIV;
790}
791
792static inline bool si_fromuser(const struct kernel_siginfo *info)
793{
794 return info == SEND_SIG_NOINFO ||
795 (!is_si_special(info) && SI_FROMUSER(info));
796}
797
798
799
800
801static bool kill_ok_by_cred(struct task_struct *t)
802{
803 const struct cred *cred = current_cred();
804 const struct cred *tcred = __task_cred(t);
805
806 return uid_eq(cred->euid, tcred->suid) ||
807 uid_eq(cred->euid, tcred->uid) ||
808 uid_eq(cred->uid, tcred->suid) ||
809 uid_eq(cred->uid, tcred->uid) ||
810 ns_capable(tcred->user_ns, CAP_KILL);
811}
812
813
814
815
816
817static int check_kill_permission(int sig, struct kernel_siginfo *info,
818 struct task_struct *t)
819{
820 struct pid *sid;
821 int error;
822
823 if (!valid_signal(sig))
824 return -EINVAL;
825
826 if (!si_fromuser(info))
827 return 0;
828
829 error = audit_signal_info(sig, t);
830 if (error)
831 return error;
832
833 if (!same_thread_group(current, t) &&
834 !kill_ok_by_cred(t)) {
835 switch (sig) {
836 case SIGCONT:
837 sid = task_session(t);
838
839
840
841
842 if (!sid || sid == task_session(current))
843 break;
844
845 default:
846 return -EPERM;
847 }
848 }
849
850 return security_task_kill(t, info, sig, NULL);
851}
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870static void ptrace_trap_notify(struct task_struct *t)
871{
872 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
873 assert_spin_locked(&t->sighand->siglock);
874
875 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
876 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
877}
878
879
880
881
882
883
884
885
886
887
888
889static bool prepare_signal(int sig, struct task_struct *p, bool force)
890{
891 struct signal_struct *signal = p->signal;
892 struct task_struct *t;
893 sigset_t flush;
894
895 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
896 if (!(signal->flags & SIGNAL_GROUP_EXIT))
897 return sig == SIGKILL;
898
899
900
901 } else if (sig_kernel_stop(sig)) {
902
903
904
905 siginitset(&flush, sigmask(SIGCONT));
906 flush_sigqueue_mask(&flush, &signal->shared_pending);
907 for_each_thread(p, t)
908 flush_sigqueue_mask(&flush, &t->pending);
909 } else if (sig == SIGCONT) {
910 unsigned int why;
911
912
913
914 siginitset(&flush, SIG_KERNEL_STOP_MASK);
915 flush_sigqueue_mask(&flush, &signal->shared_pending);
916 for_each_thread(p, t) {
917 flush_sigqueue_mask(&flush, &t->pending);
918 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
919 if (likely(!(t->ptrace & PT_SEIZED)))
920 wake_up_state(t, __TASK_STOPPED);
921 else
922 ptrace_trap_notify(t);
923 }
924
925
926
927
928
929
930
931
932
933 why = 0;
934 if (signal->flags & SIGNAL_STOP_STOPPED)
935 why |= SIGNAL_CLD_CONTINUED;
936 else if (signal->group_stop_count)
937 why |= SIGNAL_CLD_STOPPED;
938
939 if (why) {
940
941
942
943
944
945 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
946 signal->group_stop_count = 0;
947 signal->group_exit_code = 0;
948 }
949 }
950
951 return !sig_ignored(p, sig, force);
952}
953
954
955
956
957
958
959
960
961
962static inline bool wants_signal(int sig, struct task_struct *p)
963{
964 if (sigismember(&p->blocked, sig))
965 return false;
966
967 if (p->flags & PF_EXITING)
968 return false;
969
970 if (sig == SIGKILL)
971 return true;
972
973 if (task_is_stopped_or_traced(p))
974 return false;
975
976 return task_curr(p) || !signal_pending(p);
977}
978
979static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
980{
981 struct signal_struct *signal = p->signal;
982 struct task_struct *t;
983
984
985
986
987
988
989
990 if (wants_signal(sig, p))
991 t = p;
992 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
993
994
995
996
997 return;
998 else {
999
1000
1001
1002 t = signal->curr_target;
1003 while (!wants_signal(sig, t)) {
1004 t = next_thread(t);
1005 if (t == signal->curr_target)
1006
1007
1008
1009
1010
1011 return;
1012 }
1013 signal->curr_target = t;
1014 }
1015
1016
1017
1018
1019
1020 if (sig_fatal(p, sig) &&
1021 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1022 !sigismember(&t->real_blocked, sig) &&
1023 (sig == SIGKILL || !p->ptrace)) {
1024
1025
1026
1027 if (!sig_kernel_coredump(sig)) {
1028
1029
1030
1031
1032
1033
1034 signal->flags = SIGNAL_GROUP_EXIT;
1035 signal->group_exit_code = sig;
1036 signal->group_stop_count = 0;
1037 t = p;
1038 do {
1039 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1040 sigaddset(&t->pending.signal, SIGKILL);
1041 signal_wake_up(t, 1);
1042 } while_each_thread(p, t);
1043 return;
1044 }
1045 }
1046
1047
1048
1049
1050
1051 signal_wake_up(t, sig == SIGKILL);
1052 return;
1053}
1054
1055static inline bool legacy_queue(struct sigpending *signals, int sig)
1056{
1057 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1058}
1059
1060#ifdef CONFIG_USER_NS
1061static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
1062{
1063 if (current_user_ns() == task_cred_xxx(t, user_ns))
1064 return;
1065
1066 if (SI_FROMKERNEL(info))
1067 return;
1068
1069 rcu_read_lock();
1070 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1071 make_kuid(current_user_ns(), info->si_uid));
1072 rcu_read_unlock();
1073}
1074#else
1075static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
1076{
1077 return;
1078}
1079#endif
1080
1081static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1082 enum pid_type type, int from_ancestor_ns)
1083{
1084 struct sigpending *pending;
1085 struct sigqueue *q;
1086 int override_rlimit;
1087 int ret = 0, result;
1088
1089 assert_spin_locked(&t->sighand->siglock);
1090
1091 result = TRACE_SIGNAL_IGNORED;
1092 if (!prepare_signal(sig, t,
1093 from_ancestor_ns || (info == SEND_SIG_PRIV)))
1094 goto ret;
1095
1096 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1097
1098
1099
1100
1101
1102 result = TRACE_SIGNAL_ALREADY_PENDING;
1103 if (legacy_queue(pending, sig))
1104 goto ret;
1105
1106 result = TRACE_SIGNAL_DELIVERED;
1107
1108
1109
1110 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1111 goto out_set;
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122 if (sig < SIGRTMIN)
1123 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1124 else
1125 override_rlimit = 0;
1126
1127 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1128 if (q) {
1129 list_add_tail(&q->list, &pending->list);
1130 switch ((unsigned long) info) {
1131 case (unsigned long) SEND_SIG_NOINFO:
1132 clear_siginfo(&q->info);
1133 q->info.si_signo = sig;
1134 q->info.si_errno = 0;
1135 q->info.si_code = SI_USER;
1136 q->info.si_pid = task_tgid_nr_ns(current,
1137 task_active_pid_ns(t));
1138 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1139 break;
1140 case (unsigned long) SEND_SIG_PRIV:
1141 clear_siginfo(&q->info);
1142 q->info.si_signo = sig;
1143 q->info.si_errno = 0;
1144 q->info.si_code = SI_KERNEL;
1145 q->info.si_pid = 0;
1146 q->info.si_uid = 0;
1147 break;
1148 default:
1149 copy_siginfo(&q->info, info);
1150 if (from_ancestor_ns)
1151 q->info.si_pid = 0;
1152 break;
1153 }
1154
1155 userns_fixup_signal_uid(&q->info, t);
1156
1157 } else if (!is_si_special(info)) {
1158 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1159
1160
1161
1162
1163
1164 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1165 ret = -EAGAIN;
1166 goto ret;
1167 } else {
1168
1169
1170
1171
1172 result = TRACE_SIGNAL_LOSE_INFO;
1173 }
1174 }
1175
1176out_set:
1177 signalfd_notify(t, sig);
1178 sigaddset(&pending->signal, sig);
1179
1180
1181 if (type > PIDTYPE_TGID) {
1182 struct multiprocess_signals *delayed;
1183 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1184 sigset_t *signal = &delayed->signal;
1185
1186 if (sig == SIGCONT)
1187 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1188 else if (sig_kernel_stop(sig))
1189 sigdelset(signal, SIGCONT);
1190 sigaddset(signal, sig);
1191 }
1192 }
1193
1194 complete_signal(sig, t, type);
1195ret:
1196 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1197 return ret;
1198}
1199
1200static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1201 enum pid_type type)
1202{
1203 int from_ancestor_ns = 0;
1204
1205#ifdef CONFIG_PID_NS
1206 from_ancestor_ns = si_fromuser(info) &&
1207 !task_pid_nr_ns(current, task_active_pid_ns(t));
1208#endif
1209
1210 return __send_signal(sig, info, t, type, from_ancestor_ns);
1211}
1212
1213static void print_fatal_signal(int signr)
1214{
1215 struct pt_regs *regs = signal_pt_regs();
1216 pr_info("potentially unexpected fatal signal %d.\n", signr);
1217
1218#if defined(__i386__) && !defined(__arch_um__)
1219 pr_info("code at %08lx: ", regs->ip);
1220 {
1221 int i;
1222 for (i = 0; i < 16; i++) {
1223 unsigned char insn;
1224
1225 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1226 break;
1227 pr_cont("%02x ", insn);
1228 }
1229 }
1230 pr_cont("\n");
1231#endif
1232 preempt_disable();
1233 show_regs(regs);
1234 preempt_enable();
1235}
1236
1237static int __init setup_print_fatal_signals(char *str)
1238{
1239 get_option (&str, &print_fatal_signals);
1240
1241 return 1;
1242}
1243
1244__setup("print-fatal-signals=", setup_print_fatal_signals);
1245
1246int
1247__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1248{
1249 return send_signal(sig, info, p, PIDTYPE_TGID);
1250}
1251
1252int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1253 enum pid_type type)
1254{
1255 unsigned long flags;
1256 int ret = -ESRCH;
1257
1258 if (lock_task_sighand(p, &flags)) {
1259 ret = send_signal(sig, info, p, type);
1260 unlock_task_sighand(p, &flags);
1261 }
1262
1263 return ret;
1264}
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277int
1278force_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *t)
1279{
1280 unsigned long int flags;
1281 int ret, blocked, ignored;
1282 struct k_sigaction *action;
1283
1284 spin_lock_irqsave(&t->sighand->siglock, flags);
1285 action = &t->sighand->action[sig-1];
1286 ignored = action->sa.sa_handler == SIG_IGN;
1287 blocked = sigismember(&t->blocked, sig);
1288 if (blocked || ignored) {
1289 action->sa.sa_handler = SIG_DFL;
1290 if (blocked) {
1291 sigdelset(&t->blocked, sig);
1292 recalc_sigpending_and_wake(t);
1293 }
1294 }
1295
1296
1297
1298
1299 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1300 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1301 ret = send_signal(sig, info, t, PIDTYPE_PID);
1302 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1303
1304 return ret;
1305}
1306
1307
1308
1309
1310int zap_other_threads(struct task_struct *p)
1311{
1312 struct task_struct *t = p;
1313 int count = 0;
1314
1315 p->signal->group_stop_count = 0;
1316
1317 while_each_thread(p, t) {
1318 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1319 count++;
1320
1321
1322 if (t->exit_state)
1323 continue;
1324 sigaddset(&t->pending.signal, SIGKILL);
1325 signal_wake_up(t, 1);
1326 }
1327
1328 return count;
1329}
1330
1331struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1332 unsigned long *flags)
1333{
1334 struct sighand_struct *sighand;
1335
1336 rcu_read_lock();
1337 for (;;) {
1338 sighand = rcu_dereference(tsk->sighand);
1339 if (unlikely(sighand == NULL))
1340 break;
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353 spin_lock_irqsave(&sighand->siglock, *flags);
1354 if (likely(sighand == tsk->sighand))
1355 break;
1356 spin_unlock_irqrestore(&sighand->siglock, *flags);
1357 }
1358 rcu_read_unlock();
1359
1360 return sighand;
1361}
1362
1363
1364
1365
1366int group_send_sig_info(int sig, struct kernel_siginfo *info,
1367 struct task_struct *p, enum pid_type type)
1368{
1369 int ret;
1370
1371 rcu_read_lock();
1372 ret = check_kill_permission(sig, info, p);
1373 rcu_read_unlock();
1374
1375 if (!ret && sig)
1376 ret = do_send_sig_info(sig, info, p, type);
1377
1378 return ret;
1379}
1380
1381
1382
1383
1384
1385
1386int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1387{
1388 struct task_struct *p = NULL;
1389 int retval, success;
1390
1391 success = 0;
1392 retval = -ESRCH;
1393 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1394 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1395 success |= !err;
1396 retval = err;
1397 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1398 return success ? 0 : retval;
1399}
1400
1401int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1402{
1403 int error = -ESRCH;
1404 struct task_struct *p;
1405
1406 for (;;) {
1407 rcu_read_lock();
1408 p = pid_task(pid, PIDTYPE_PID);
1409 if (p)
1410 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1411 rcu_read_unlock();
1412 if (likely(!p || error != -ESRCH))
1413 return error;
1414
1415
1416
1417
1418
1419
1420 }
1421}
1422
1423static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1424{
1425 int error;
1426 rcu_read_lock();
1427 error = kill_pid_info(sig, info, find_vpid(pid));
1428 rcu_read_unlock();
1429 return error;
1430}
1431
1432static inline bool kill_as_cred_perm(const struct cred *cred,
1433 struct task_struct *target)
1434{
1435 const struct cred *pcred = __task_cred(target);
1436
1437 return uid_eq(cred->euid, pcred->suid) ||
1438 uid_eq(cred->euid, pcred->uid) ||
1439 uid_eq(cred->uid, pcred->suid) ||
1440 uid_eq(cred->uid, pcred->uid);
1441}
1442
1443
1444int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid,
1445 const struct cred *cred)
1446{
1447 int ret = -EINVAL;
1448 struct task_struct *p;
1449 unsigned long flags;
1450
1451 if (!valid_signal(sig))
1452 return ret;
1453
1454 rcu_read_lock();
1455 p = pid_task(pid, PIDTYPE_PID);
1456 if (!p) {
1457 ret = -ESRCH;
1458 goto out_unlock;
1459 }
1460 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1461 ret = -EPERM;
1462 goto out_unlock;
1463 }
1464 ret = security_task_kill(p, info, sig, cred);
1465 if (ret)
1466 goto out_unlock;
1467
1468 if (sig) {
1469 if (lock_task_sighand(p, &flags)) {
1470 ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
1471 unlock_task_sighand(p, &flags);
1472 } else
1473 ret = -ESRCH;
1474 }
1475out_unlock:
1476 rcu_read_unlock();
1477 return ret;
1478}
1479EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1480
1481
1482
1483
1484
1485
1486
1487
1488static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1489{
1490 int ret;
1491
1492 if (pid > 0) {
1493 rcu_read_lock();
1494 ret = kill_pid_info(sig, info, find_vpid(pid));
1495 rcu_read_unlock();
1496 return ret;
1497 }
1498
1499
1500 if (pid == INT_MIN)
1501 return -ESRCH;
1502
1503 read_lock(&tasklist_lock);
1504 if (pid != -1) {
1505 ret = __kill_pgrp_info(sig, info,
1506 pid ? find_vpid(-pid) : task_pgrp(current));
1507 } else {
1508 int retval = 0, count = 0;
1509 struct task_struct * p;
1510
1511 for_each_process(p) {
1512 if (task_pid_vnr(p) > 1 &&
1513 !same_thread_group(p, current)) {
1514 int err = group_send_sig_info(sig, info, p,
1515 PIDTYPE_MAX);
1516 ++count;
1517 if (err != -EPERM)
1518 retval = err;
1519 }
1520 }
1521 ret = count ? retval : -ESRCH;
1522 }
1523 read_unlock(&tasklist_lock);
1524
1525 return ret;
1526}
1527
1528
1529
1530
1531
1532int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1533{
1534
1535
1536
1537
1538 if (!valid_signal(sig))
1539 return -EINVAL;
1540
1541 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1542}
1543EXPORT_SYMBOL(send_sig_info);
1544
1545#define __si_special(priv) \
1546 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1547
1548int
1549send_sig(int sig, struct task_struct *p, int priv)
1550{
1551 return send_sig_info(sig, __si_special(priv), p);
1552}
1553EXPORT_SYMBOL(send_sig);
1554
1555void force_sig(int sig, struct task_struct *p)
1556{
1557 force_sig_info(sig, SEND_SIG_PRIV, p);
1558}
1559EXPORT_SYMBOL(force_sig);
1560
1561
1562
1563
1564
1565
1566
1567void force_sigsegv(int sig, struct task_struct *p)
1568{
1569 if (sig == SIGSEGV) {
1570 unsigned long flags;
1571 spin_lock_irqsave(&p->sighand->siglock, flags);
1572 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1573 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1574 }
1575 force_sig(SIGSEGV, p);
1576}
1577
1578int force_sig_fault(int sig, int code, void __user *addr
1579 ___ARCH_SI_TRAPNO(int trapno)
1580 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1581 , struct task_struct *t)
1582{
1583 struct kernel_siginfo info;
1584
1585 clear_siginfo(&info);
1586 info.si_signo = sig;
1587 info.si_errno = 0;
1588 info.si_code = code;
1589 info.si_addr = addr;
1590#ifdef __ARCH_SI_TRAPNO
1591 info.si_trapno = trapno;
1592#endif
1593#ifdef __ia64__
1594 info.si_imm = imm;
1595 info.si_flags = flags;
1596 info.si_isr = isr;
1597#endif
1598 return force_sig_info(info.si_signo, &info, t);
1599}
1600
1601int send_sig_fault(int sig, int code, void __user *addr
1602 ___ARCH_SI_TRAPNO(int trapno)
1603 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1604 , struct task_struct *t)
1605{
1606 struct kernel_siginfo info;
1607
1608 clear_siginfo(&info);
1609 info.si_signo = sig;
1610 info.si_errno = 0;
1611 info.si_code = code;
1612 info.si_addr = addr;
1613#ifdef __ARCH_SI_TRAPNO
1614 info.si_trapno = trapno;
1615#endif
1616#ifdef __ia64__
1617 info.si_imm = imm;
1618 info.si_flags = flags;
1619 info.si_isr = isr;
1620#endif
1621 return send_sig_info(info.si_signo, &info, t);
1622}
1623
1624int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1625{
1626 struct kernel_siginfo info;
1627
1628 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1629 clear_siginfo(&info);
1630 info.si_signo = SIGBUS;
1631 info.si_errno = 0;
1632 info.si_code = code;
1633 info.si_addr = addr;
1634 info.si_addr_lsb = lsb;
1635 return force_sig_info(info.si_signo, &info, t);
1636}
1637
1638int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1639{
1640 struct kernel_siginfo info;
1641
1642 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1643 clear_siginfo(&info);
1644 info.si_signo = SIGBUS;
1645 info.si_errno = 0;
1646 info.si_code = code;
1647 info.si_addr = addr;
1648 info.si_addr_lsb = lsb;
1649 return send_sig_info(info.si_signo, &info, t);
1650}
1651EXPORT_SYMBOL(send_sig_mceerr);
1652
1653int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1654{
1655 struct kernel_siginfo info;
1656
1657 clear_siginfo(&info);
1658 info.si_signo = SIGSEGV;
1659 info.si_errno = 0;
1660 info.si_code = SEGV_BNDERR;
1661 info.si_addr = addr;
1662 info.si_lower = lower;
1663 info.si_upper = upper;
1664 return force_sig_info(info.si_signo, &info, current);
1665}
1666
1667#ifdef SEGV_PKUERR
1668int force_sig_pkuerr(void __user *addr, u32 pkey)
1669{
1670 struct kernel_siginfo info;
1671
1672 clear_siginfo(&info);
1673 info.si_signo = SIGSEGV;
1674 info.si_errno = 0;
1675 info.si_code = SEGV_PKUERR;
1676 info.si_addr = addr;
1677 info.si_pkey = pkey;
1678 return force_sig_info(info.si_signo, &info, current);
1679}
1680#endif
1681
1682
1683
1684
1685int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1686{
1687 struct kernel_siginfo info;
1688
1689 clear_siginfo(&info);
1690 info.si_signo = SIGTRAP;
1691 info.si_errno = errno;
1692 info.si_code = TRAP_HWBKPT;
1693 info.si_addr = addr;
1694 return force_sig_info(info.si_signo, &info, current);
1695}
1696
1697int kill_pgrp(struct pid *pid, int sig, int priv)
1698{
1699 int ret;
1700
1701 read_lock(&tasklist_lock);
1702 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1703 read_unlock(&tasklist_lock);
1704
1705 return ret;
1706}
1707EXPORT_SYMBOL(kill_pgrp);
1708
1709int kill_pid(struct pid *pid, int sig, int priv)
1710{
1711 return kill_pid_info(sig, __si_special(priv), pid);
1712}
1713EXPORT_SYMBOL(kill_pid);
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724struct sigqueue *sigqueue_alloc(void)
1725{
1726 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1727
1728 if (q)
1729 q->flags |= SIGQUEUE_PREALLOC;
1730
1731 return q;
1732}
1733
1734void sigqueue_free(struct sigqueue *q)
1735{
1736 unsigned long flags;
1737 spinlock_t *lock = ¤t->sighand->siglock;
1738
1739 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1740
1741
1742
1743
1744
1745 spin_lock_irqsave(lock, flags);
1746 q->flags &= ~SIGQUEUE_PREALLOC;
1747
1748
1749
1750
1751 if (!list_empty(&q->list))
1752 q = NULL;
1753 spin_unlock_irqrestore(lock, flags);
1754
1755 if (q)
1756 __sigqueue_free(q);
1757}
1758
1759int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1760{
1761 int sig = q->info.si_signo;
1762 struct sigpending *pending;
1763 struct task_struct *t;
1764 unsigned long flags;
1765 int ret, result;
1766
1767 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1768
1769 ret = -1;
1770 rcu_read_lock();
1771 t = pid_task(pid, type);
1772 if (!t || !likely(lock_task_sighand(t, &flags)))
1773 goto ret;
1774
1775 ret = 1;
1776 result = TRACE_SIGNAL_IGNORED;
1777 if (!prepare_signal(sig, t, false))
1778 goto out;
1779
1780 ret = 0;
1781 if (unlikely(!list_empty(&q->list))) {
1782
1783
1784
1785
1786 BUG_ON(q->info.si_code != SI_TIMER);
1787 q->info.si_overrun++;
1788 result = TRACE_SIGNAL_ALREADY_PENDING;
1789 goto out;
1790 }
1791 q->info.si_overrun = 0;
1792
1793 signalfd_notify(t, sig);
1794 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1795 list_add_tail(&q->list, &pending->list);
1796 sigaddset(&pending->signal, sig);
1797 complete_signal(sig, t, type);
1798 result = TRACE_SIGNAL_DELIVERED;
1799out:
1800 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1801 unlock_task_sighand(t, &flags);
1802ret:
1803 rcu_read_unlock();
1804 return ret;
1805}
1806
1807
1808
1809
1810
1811
1812
1813
1814bool do_notify_parent(struct task_struct *tsk, int sig)
1815{
1816 struct kernel_siginfo info;
1817 unsigned long flags;
1818 struct sighand_struct *psig;
1819 bool autoreap = false;
1820 u64 utime, stime;
1821
1822 BUG_ON(sig == -1);
1823
1824
1825 BUG_ON(task_is_stopped_or_traced(tsk));
1826
1827 BUG_ON(!tsk->ptrace &&
1828 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1829
1830 if (sig != SIGCHLD) {
1831
1832
1833
1834
1835 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1836 sig = SIGCHLD;
1837 }
1838
1839 clear_siginfo(&info);
1840 info.si_signo = sig;
1841 info.si_errno = 0;
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853 rcu_read_lock();
1854 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1855 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1856 task_uid(tsk));
1857 rcu_read_unlock();
1858
1859 task_cputime(tsk, &utime, &stime);
1860 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1861 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1862
1863 info.si_status = tsk->exit_code & 0x7f;
1864 if (tsk->exit_code & 0x80)
1865 info.si_code = CLD_DUMPED;
1866 else if (tsk->exit_code & 0x7f)
1867 info.si_code = CLD_KILLED;
1868 else {
1869 info.si_code = CLD_EXITED;
1870 info.si_status = tsk->exit_code >> 8;
1871 }
1872
1873 psig = tsk->parent->sighand;
1874 spin_lock_irqsave(&psig->siglock, flags);
1875 if (!tsk->ptrace && sig == SIGCHLD &&
1876 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1877 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893 autoreap = true;
1894 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1895 sig = 0;
1896 }
1897 if (valid_signal(sig) && sig)
1898 __group_send_sig_info(sig, &info, tsk->parent);
1899 __wake_up_parent(tsk, tsk->parent);
1900 spin_unlock_irqrestore(&psig->siglock, flags);
1901
1902 return autoreap;
1903}
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918static void do_notify_parent_cldstop(struct task_struct *tsk,
1919 bool for_ptracer, int why)
1920{
1921 struct kernel_siginfo info;
1922 unsigned long flags;
1923 struct task_struct *parent;
1924 struct sighand_struct *sighand;
1925 u64 utime, stime;
1926
1927 if (for_ptracer) {
1928 parent = tsk->parent;
1929 } else {
1930 tsk = tsk->group_leader;
1931 parent = tsk->real_parent;
1932 }
1933
1934 clear_siginfo(&info);
1935 info.si_signo = SIGCHLD;
1936 info.si_errno = 0;
1937
1938
1939
1940 rcu_read_lock();
1941 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1942 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1943 rcu_read_unlock();
1944
1945 task_cputime(tsk, &utime, &stime);
1946 info.si_utime = nsec_to_clock_t(utime);
1947 info.si_stime = nsec_to_clock_t(stime);
1948
1949 info.si_code = why;
1950 switch (why) {
1951 case CLD_CONTINUED:
1952 info.si_status = SIGCONT;
1953 break;
1954 case CLD_STOPPED:
1955 info.si_status = tsk->signal->group_exit_code & 0x7f;
1956 break;
1957 case CLD_TRAPPED:
1958 info.si_status = tsk->exit_code & 0x7f;
1959 break;
1960 default:
1961 BUG();
1962 }
1963
1964 sighand = parent->sighand;
1965 spin_lock_irqsave(&sighand->siglock, flags);
1966 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1967 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1968 __group_send_sig_info(SIGCHLD, &info, parent);
1969
1970
1971
1972 __wake_up_parent(tsk, parent);
1973 spin_unlock_irqrestore(&sighand->siglock, flags);
1974}
1975
1976static inline bool may_ptrace_stop(void)
1977{
1978 if (!likely(current->ptrace))
1979 return false;
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993 if (unlikely(current->mm->core_state) &&
1994 unlikely(current->mm == current->parent->mm))
1995 return false;
1996
1997 return true;
1998}
1999
2000
2001
2002
2003
2004static bool sigkill_pending(struct task_struct *tsk)
2005{
2006 return sigismember(&tsk->pending.signal, SIGKILL) ||
2007 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2008}
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2022 __releases(¤t->sighand->siglock)
2023 __acquires(¤t->sighand->siglock)
2024{
2025 bool gstop_done = false;
2026
2027 if (arch_ptrace_stop_needed(exit_code, info)) {
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039 spin_unlock_irq(¤t->sighand->siglock);
2040 arch_ptrace_stop(exit_code, info);
2041 spin_lock_irq(¤t->sighand->siglock);
2042 if (sigkill_pending(current))
2043 return;
2044 }
2045
2046 set_special_state(TASK_TRACED);
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066 smp_wmb();
2067
2068 current->last_siginfo = info;
2069 current->exit_code = exit_code;
2070
2071
2072
2073
2074
2075
2076
2077
2078 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2079 gstop_done = task_participate_group_stop(current);
2080
2081
2082 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2083 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2084 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2085
2086
2087 task_clear_jobctl_trapping(current);
2088
2089 spin_unlock_irq(¤t->sighand->siglock);
2090 read_lock(&tasklist_lock);
2091 if (may_ptrace_stop()) {
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102 do_notify_parent_cldstop(current, true, why);
2103 if (gstop_done && ptrace_reparented(current))
2104 do_notify_parent_cldstop(current, false, why);
2105
2106
2107
2108
2109
2110
2111
2112 preempt_disable();
2113 read_unlock(&tasklist_lock);
2114 preempt_enable_no_resched();
2115 cgroup_enter_frozen();
2116 freezable_schedule();
2117 cgroup_leave_frozen(true);
2118 } else {
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129 if (gstop_done)
2130 do_notify_parent_cldstop(current, false, why);
2131
2132
2133 __set_current_state(TASK_RUNNING);
2134 if (clear_code)
2135 current->exit_code = 0;
2136 read_unlock(&tasklist_lock);
2137 }
2138
2139
2140
2141
2142
2143
2144 spin_lock_irq(¤t->sighand->siglock);
2145 current->last_siginfo = NULL;
2146
2147
2148 current->jobctl &= ~JOBCTL_LISTENING;
2149
2150
2151
2152
2153
2154
2155 recalc_sigpending_tsk(current);
2156}
2157
2158static void ptrace_do_notify(int signr, int exit_code, int why)
2159{
2160 kernel_siginfo_t info;
2161
2162 clear_siginfo(&info);
2163 info.si_signo = signr;
2164 info.si_code = exit_code;
2165 info.si_pid = task_pid_vnr(current);
2166 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2167
2168
2169 ptrace_stop(exit_code, why, 1, &info);
2170}
2171
2172void ptrace_notify(int exit_code)
2173{
2174 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2175 if (unlikely(current->task_works))
2176 task_work_run();
2177
2178 spin_lock_irq(¤t->sighand->siglock);
2179 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2180 spin_unlock_irq(¤t->sighand->siglock);
2181}
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205static bool do_signal_stop(int signr)
2206 __releases(¤t->sighand->siglock)
2207{
2208 struct signal_struct *sig = current->signal;
2209
2210 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2211 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2212 struct task_struct *t;
2213
2214
2215 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2216
2217 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2218 unlikely(signal_group_exit(sig)))
2219 return false;
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2240 sig->group_exit_code = signr;
2241
2242 sig->group_stop_count = 0;
2243
2244 if (task_set_jobctl_pending(current, signr | gstop))
2245 sig->group_stop_count++;
2246
2247 t = current;
2248 while_each_thread(current, t) {
2249
2250
2251
2252
2253
2254 if (!task_is_stopped(t) &&
2255 task_set_jobctl_pending(t, signr | gstop)) {
2256 sig->group_stop_count++;
2257 if (likely(!(t->ptrace & PT_SEIZED)))
2258 signal_wake_up(t, 0);
2259 else
2260 ptrace_trap_notify(t);
2261 }
2262 }
2263 }
2264
2265 if (likely(!current->ptrace)) {
2266 int notify = 0;
2267
2268
2269
2270
2271
2272
2273 if (task_participate_group_stop(current))
2274 notify = CLD_STOPPED;
2275
2276 set_special_state(TASK_STOPPED);
2277 spin_unlock_irq(¤t->sighand->siglock);
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288 if (notify) {
2289 read_lock(&tasklist_lock);
2290 do_notify_parent_cldstop(current, false, notify);
2291 read_unlock(&tasklist_lock);
2292 }
2293
2294
2295 cgroup_enter_frozen();
2296 freezable_schedule();
2297 return true;
2298 } else {
2299
2300
2301
2302
2303 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2304 return false;
2305 }
2306}
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323static void do_jobctl_trap(void)
2324{
2325 struct signal_struct *signal = current->signal;
2326 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2327
2328 if (current->ptrace & PT_SEIZED) {
2329 if (!signal->group_stop_count &&
2330 !(signal->flags & SIGNAL_STOP_STOPPED))
2331 signr = SIGTRAP;
2332 WARN_ON_ONCE(!signr);
2333 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2334 CLD_STOPPED);
2335 } else {
2336 WARN_ON_ONCE(!signr);
2337 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2338 current->exit_code = 0;
2339 }
2340}
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352static void do_freezer_trap(void)
2353 __releases(¤t->sighand->siglock)
2354{
2355
2356
2357
2358
2359
2360 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2361 JOBCTL_TRAP_FREEZE) {
2362 spin_unlock_irq(¤t->sighand->siglock);
2363 return;
2364 }
2365
2366
2367
2368
2369
2370
2371
2372 __set_current_state(TASK_INTERRUPTIBLE);
2373 clear_thread_flag(TIF_SIGPENDING);
2374 spin_unlock_irq(¤t->sighand->siglock);
2375 cgroup_enter_frozen();
2376 freezable_schedule();
2377}
2378
2379static int ptrace_signal(int signr, kernel_siginfo_t *info)
2380{
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2391 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2392
2393
2394 signr = current->exit_code;
2395 if (signr == 0)
2396 return signr;
2397
2398 current->exit_code = 0;
2399
2400
2401
2402
2403
2404
2405
2406 if (signr != info->si_signo) {
2407 clear_siginfo(info);
2408 info->si_signo = signr;
2409 info->si_errno = 0;
2410 info->si_code = SI_USER;
2411 rcu_read_lock();
2412 info->si_pid = task_pid_vnr(current->parent);
2413 info->si_uid = from_kuid_munged(current_user_ns(),
2414 task_uid(current->parent));
2415 rcu_read_unlock();
2416 }
2417
2418
2419 if (sigismember(¤t->blocked, signr)) {
2420 send_signal(signr, info, current, PIDTYPE_PID);
2421 signr = 0;
2422 }
2423
2424 return signr;
2425}
2426
2427bool get_signal(struct ksignal *ksig)
2428{
2429 struct sighand_struct *sighand = current->sighand;
2430 struct signal_struct *signal = current->signal;
2431 int signr;
2432
2433 if (unlikely(current->task_works))
2434 task_work_run();
2435
2436 if (unlikely(uprobe_deny_signal()))
2437 return false;
2438
2439
2440
2441
2442
2443
2444 try_to_freeze();
2445
2446relock:
2447 spin_lock_irq(&sighand->siglock);
2448
2449
2450
2451
2452
2453 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2454 int why;
2455
2456 if (signal->flags & SIGNAL_CLD_CONTINUED)
2457 why = CLD_CONTINUED;
2458 else
2459 why = CLD_STOPPED;
2460
2461 signal->flags &= ~SIGNAL_CLD_MASK;
2462
2463 spin_unlock_irq(&sighand->siglock);
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473 read_lock(&tasklist_lock);
2474 do_notify_parent_cldstop(current, false, why);
2475
2476 if (ptrace_reparented(current->group_leader))
2477 do_notify_parent_cldstop(current->group_leader,
2478 true, why);
2479 read_unlock(&tasklist_lock);
2480
2481 goto relock;
2482 }
2483
2484
2485 if (signal_group_exit(signal)) {
2486 ksig->info.si_signo = signr = SIGKILL;
2487 sigdelset(¤t->pending.signal, SIGKILL);
2488 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2489 &sighand->action[SIGKILL - 1]);
2490 recalc_sigpending();
2491 goto fatal;
2492 }
2493
2494 for (;;) {
2495 struct k_sigaction *ka;
2496
2497 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2498 do_signal_stop(0))
2499 goto relock;
2500
2501 if (unlikely(current->jobctl &
2502 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2503 if (current->jobctl & JOBCTL_TRAP_MASK) {
2504 do_jobctl_trap();
2505 spin_unlock_irq(&sighand->siglock);
2506 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2507 do_freezer_trap();
2508
2509 goto relock;
2510 }
2511
2512
2513
2514
2515
2516 if (unlikely(cgroup_task_frozen(current))) {
2517 spin_unlock_irq(&sighand->siglock);
2518 cgroup_leave_frozen(false);
2519 goto relock;
2520 }
2521
2522
2523
2524
2525
2526
2527
2528 signr = dequeue_synchronous_signal(&ksig->info);
2529 if (!signr)
2530 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2531
2532 if (!signr)
2533 break;
2534
2535 if (unlikely(current->ptrace) && signr != SIGKILL) {
2536 signr = ptrace_signal(signr, &ksig->info);
2537 if (!signr)
2538 continue;
2539 }
2540
2541 ka = &sighand->action[signr-1];
2542
2543
2544 trace_signal_deliver(signr, &ksig->info, ka);
2545
2546 if (ka->sa.sa_handler == SIG_IGN)
2547 continue;
2548 if (ka->sa.sa_handler != SIG_DFL) {
2549
2550 ksig->ka = *ka;
2551
2552 if (ka->sa.sa_flags & SA_ONESHOT)
2553 ka->sa.sa_handler = SIG_DFL;
2554
2555 break;
2556 }
2557
2558
2559
2560
2561 if (sig_kernel_ignore(signr))
2562 continue;
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2575 !sig_kernel_only(signr))
2576 continue;
2577
2578 if (sig_kernel_stop(signr)) {
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589 if (signr != SIGSTOP) {
2590 spin_unlock_irq(&sighand->siglock);
2591
2592
2593
2594 if (is_current_pgrp_orphaned())
2595 goto relock;
2596
2597 spin_lock_irq(&sighand->siglock);
2598 }
2599
2600 if (likely(do_signal_stop(ksig->info.si_signo))) {
2601
2602 goto relock;
2603 }
2604
2605
2606
2607
2608
2609 continue;
2610 }
2611
2612 fatal:
2613 spin_unlock_irq(&sighand->siglock);
2614 if (unlikely(cgroup_task_frozen(current)))
2615 cgroup_leave_frozen(true);
2616
2617
2618
2619
2620 current->flags |= PF_SIGNALED;
2621
2622 if (sig_kernel_coredump(signr)) {
2623 if (print_fatal_signals)
2624 print_fatal_signal(ksig->info.si_signo);
2625 proc_coredump_connector(current);
2626
2627
2628
2629
2630
2631
2632
2633
2634 do_coredump(&ksig->info);
2635 }
2636
2637
2638
2639
2640 do_group_exit(ksig->info.si_signo);
2641
2642 }
2643 spin_unlock_irq(&sighand->siglock);
2644
2645 ksig->sig = signr;
2646 return ksig->sig > 0;
2647}
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659static void signal_delivered(struct ksignal *ksig, int stepping)
2660{
2661 sigset_t blocked;
2662
2663
2664
2665
2666
2667 clear_restore_sigmask();
2668
2669 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2670 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2671 sigaddset(&blocked, ksig->sig);
2672 set_current_blocked(&blocked);
2673 tracehook_signal_handler(stepping);
2674}
2675
2676void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2677{
2678 if (failed)
2679 force_sigsegv(ksig->sig, current);
2680 else
2681 signal_delivered(ksig, stepping);
2682}
2683
2684
2685
2686
2687
2688
2689static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2690{
2691 sigset_t retarget;
2692 struct task_struct *t;
2693
2694 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2695 if (sigisemptyset(&retarget))
2696 return;
2697
2698 t = tsk;
2699 while_each_thread(tsk, t) {
2700 if (t->flags & PF_EXITING)
2701 continue;
2702
2703 if (!has_pending_signals(&retarget, &t->blocked))
2704 continue;
2705
2706 sigandsets(&retarget, &retarget, &t->blocked);
2707
2708 if (!signal_pending(t))
2709 signal_wake_up(t, 0);
2710
2711 if (sigisemptyset(&retarget))
2712 break;
2713 }
2714}
2715
2716void exit_signals(struct task_struct *tsk)
2717{
2718 int group_stop = 0;
2719 sigset_t unblocked;
2720
2721
2722
2723
2724
2725 cgroup_threadgroup_change_begin(tsk);
2726
2727 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2728 tsk->flags |= PF_EXITING;
2729 cgroup_threadgroup_change_end(tsk);
2730 return;
2731 }
2732
2733 spin_lock_irq(&tsk->sighand->siglock);
2734
2735
2736
2737
2738 tsk->flags |= PF_EXITING;
2739
2740 cgroup_threadgroup_change_end(tsk);
2741
2742 if (!signal_pending(tsk))
2743 goto out;
2744
2745 unblocked = tsk->blocked;
2746 signotset(&unblocked);
2747 retarget_shared_pending(tsk, &unblocked);
2748
2749 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2750 task_participate_group_stop(tsk))
2751 group_stop = CLD_STOPPED;
2752out:
2753 spin_unlock_irq(&tsk->sighand->siglock);
2754
2755
2756
2757
2758
2759 if (unlikely(group_stop)) {
2760 read_lock(&tasklist_lock);
2761 do_notify_parent_cldstop(tsk, false, group_stop);
2762 read_unlock(&tasklist_lock);
2763 }
2764}
2765
2766
2767
2768
2769
2770
2771
2772
2773SYSCALL_DEFINE0(restart_syscall)
2774{
2775 struct restart_block *restart = ¤t->restart_block;
2776 return restart->fn(restart);
2777}
2778
2779long do_no_restart_syscall(struct restart_block *param)
2780{
2781 return -EINTR;
2782}
2783
2784static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2785{
2786 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2787 sigset_t newblocked;
2788
2789 sigandnsets(&newblocked, newset, ¤t->blocked);
2790 retarget_shared_pending(tsk, &newblocked);
2791 }
2792 tsk->blocked = *newset;
2793 recalc_sigpending();
2794}
2795
2796
2797
2798
2799
2800
2801
2802
2803void set_current_blocked(sigset_t *newset)
2804{
2805 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2806 __set_current_blocked(newset);
2807}
2808
2809void __set_current_blocked(const sigset_t *newset)
2810{
2811 struct task_struct *tsk = current;
2812
2813
2814
2815
2816
2817 if (sigequalsets(&tsk->blocked, newset))
2818 return;
2819
2820 spin_lock_irq(&tsk->sighand->siglock);
2821 __set_task_blocked(tsk, newset);
2822 spin_unlock_irq(&tsk->sighand->siglock);
2823}
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2834{
2835 struct task_struct *tsk = current;
2836 sigset_t newset;
2837
2838
2839 if (oldset)
2840 *oldset = tsk->blocked;
2841
2842 switch (how) {
2843 case SIG_BLOCK:
2844 sigorsets(&newset, &tsk->blocked, set);
2845 break;
2846 case SIG_UNBLOCK:
2847 sigandnsets(&newset, &tsk->blocked, set);
2848 break;
2849 case SIG_SETMASK:
2850 newset = *set;
2851 break;
2852 default:
2853 return -EINVAL;
2854 }
2855
2856 __set_current_blocked(&newset);
2857 return 0;
2858}
2859EXPORT_SYMBOL(sigprocmask);
2860
2861
2862
2863
2864
2865
2866
2867int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set,
2868 sigset_t *oldset, size_t sigsetsize)
2869{
2870 if (!usigmask)
2871 return 0;
2872
2873 if (sigsetsize != sizeof(sigset_t))
2874 return -EINVAL;
2875 if (copy_from_user(set, usigmask, sizeof(sigset_t)))
2876 return -EFAULT;
2877
2878 *oldset = current->blocked;
2879 set_current_blocked(set);
2880
2881 return 0;
2882}
2883EXPORT_SYMBOL(set_user_sigmask);
2884
2885#ifdef CONFIG_COMPAT
2886int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
2887 sigset_t *set, sigset_t *oldset,
2888 size_t sigsetsize)
2889{
2890 if (!usigmask)
2891 return 0;
2892
2893 if (sigsetsize != sizeof(compat_sigset_t))
2894 return -EINVAL;
2895 if (get_compat_sigset(set, usigmask))
2896 return -EFAULT;
2897
2898 *oldset = current->blocked;
2899 set_current_blocked(set);
2900
2901 return 0;
2902}
2903EXPORT_SYMBOL(set_compat_user_sigmask);
2904#endif
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved,
2916 bool interrupted)
2917{
2918
2919 if (!usigmask)
2920 return;
2921
2922
2923
2924
2925
2926 if (interrupted) {
2927 current->saved_sigmask = *sigsaved;
2928 set_restore_sigmask();
2929 return;
2930 }
2931
2932
2933
2934
2935
2936 set_current_blocked(sigsaved);
2937}
2938EXPORT_SYMBOL(restore_user_sigmask);
2939
2940
2941
2942
2943
2944
2945
2946
2947SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2948 sigset_t __user *, oset, size_t, sigsetsize)
2949{
2950 sigset_t old_set, new_set;
2951 int error;
2952
2953
2954 if (sigsetsize != sizeof(sigset_t))
2955 return -EINVAL;
2956
2957 old_set = current->blocked;
2958
2959 if (nset) {
2960 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2961 return -EFAULT;
2962 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2963
2964 error = sigprocmask(how, &new_set, NULL);
2965 if (error)
2966 return error;
2967 }
2968
2969 if (oset) {
2970 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2971 return -EFAULT;
2972 }
2973
2974 return 0;
2975}
2976
2977#ifdef CONFIG_COMPAT
2978COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2979 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2980{
2981 sigset_t old_set = current->blocked;
2982
2983
2984 if (sigsetsize != sizeof(sigset_t))
2985 return -EINVAL;
2986
2987 if (nset) {
2988 sigset_t new_set;
2989 int error;
2990 if (get_compat_sigset(&new_set, nset))
2991 return -EFAULT;
2992 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2993
2994 error = sigprocmask(how, &new_set, NULL);
2995 if (error)
2996 return error;
2997 }
2998 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
2999}
3000#endif
3001
3002static void do_sigpending(sigset_t *set)
3003{
3004 spin_lock_irq(¤t->sighand->siglock);
3005 sigorsets(set, ¤t->pending.signal,
3006 ¤t->signal->shared_pending.signal);
3007 spin_unlock_irq(¤t->sighand->siglock);
3008
3009
3010 sigandsets(set, ¤t->blocked, set);
3011}
3012
3013
3014
3015
3016
3017
3018
3019SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3020{
3021 sigset_t set;
3022
3023 if (sigsetsize > sizeof(*uset))
3024 return -EINVAL;
3025
3026 do_sigpending(&set);
3027
3028 if (copy_to_user(uset, &set, sigsetsize))
3029 return -EFAULT;
3030
3031 return 0;
3032}
3033
3034#ifdef CONFIG_COMPAT
3035COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3036 compat_size_t, sigsetsize)
3037{
3038 sigset_t set;
3039
3040 if (sigsetsize > sizeof(*uset))
3041 return -EINVAL;
3042
3043 do_sigpending(&set);
3044
3045 return put_compat_sigset(uset, &set, sigsetsize);
3046}
3047#endif
3048
3049static const struct {
3050 unsigned char limit, layout;
3051} sig_sicodes[] = {
3052 [SIGILL] = { NSIGILL, SIL_FAULT },
3053 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3054 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3055 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3056 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3057#if defined(SIGEMT)
3058 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3059#endif
3060 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3061 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3062 [SIGSYS] = { NSIGSYS, SIL_SYS },
3063};
3064
3065static bool known_siginfo_layout(unsigned sig, int si_code)
3066{
3067 if (si_code == SI_KERNEL)
3068 return true;
3069 else if ((si_code > SI_USER)) {
3070 if (sig_specific_sicodes(sig)) {
3071 if (si_code <= sig_sicodes[sig].limit)
3072 return true;
3073 }
3074 else if (si_code <= NSIGPOLL)
3075 return true;
3076 }
3077 else if (si_code >= SI_DETHREAD)
3078 return true;
3079 else if (si_code == SI_ASYNCNL)
3080 return true;
3081 return false;
3082}
3083
3084enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3085{
3086 enum siginfo_layout layout = SIL_KILL;
3087 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3088 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3089 (si_code <= sig_sicodes[sig].limit)) {
3090 layout = sig_sicodes[sig].layout;
3091
3092 if ((sig == SIGBUS) &&
3093 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3094 layout = SIL_FAULT_MCEERR;
3095 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3096 layout = SIL_FAULT_BNDERR;
3097#ifdef SEGV_PKUERR
3098 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3099 layout = SIL_FAULT_PKUERR;
3100#endif
3101 }
3102 else if (si_code <= NSIGPOLL)
3103 layout = SIL_POLL;
3104 } else {
3105 if (si_code == SI_TIMER)
3106 layout = SIL_TIMER;
3107 else if (si_code == SI_SIGIO)
3108 layout = SIL_POLL;
3109 else if (si_code < 0)
3110 layout = SIL_RT;
3111 }
3112 return layout;
3113}
3114
3115static inline char __user *si_expansion(const siginfo_t __user *info)
3116{
3117 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3118}
3119
3120int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3121{
3122 char __user *expansion = si_expansion(to);
3123 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3124 return -EFAULT;
3125 if (clear_user(expansion, SI_EXPANSION_SIZE))
3126 return -EFAULT;
3127 return 0;
3128}
3129
3130static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3131 const siginfo_t __user *from)
3132{
3133 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3134 char __user *expansion = si_expansion(from);
3135 char buf[SI_EXPANSION_SIZE];
3136 int i;
3137
3138
3139
3140
3141
3142
3143 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3144 return -EFAULT;
3145 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3146 if (buf[i] != 0)
3147 return -E2BIG;
3148 }
3149 }
3150 return 0;
3151}
3152
3153static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3154 const siginfo_t __user *from)
3155{
3156 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3157 return -EFAULT;
3158 to->si_signo = signo;
3159 return post_copy_siginfo_from_user(to, from);
3160}
3161
3162int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3163{
3164 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3165 return -EFAULT;
3166 return post_copy_siginfo_from_user(to, from);
3167}
3168
3169#ifdef CONFIG_COMPAT
3170int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3171 const struct kernel_siginfo *from)
3172#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3173{
3174 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3175}
3176int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3177 const struct kernel_siginfo *from, bool x32_ABI)
3178#endif
3179{
3180 struct compat_siginfo new;
3181 memset(&new, 0, sizeof(new));
3182
3183 new.si_signo = from->si_signo;
3184 new.si_errno = from->si_errno;
3185 new.si_code = from->si_code;
3186 switch(siginfo_layout(from->si_signo, from->si_code)) {
3187 case SIL_KILL:
3188 new.si_pid = from->si_pid;
3189 new.si_uid = from->si_uid;
3190 break;
3191 case SIL_TIMER:
3192 new.si_tid = from->si_tid;
3193 new.si_overrun = from->si_overrun;
3194 new.si_int = from->si_int;
3195 break;
3196 case SIL_POLL:
3197 new.si_band = from->si_band;
3198 new.si_fd = from->si_fd;
3199 break;
3200 case SIL_FAULT:
3201 new.si_addr = ptr_to_compat(from->si_addr);
3202#ifdef __ARCH_SI_TRAPNO
3203 new.si_trapno = from->si_trapno;
3204#endif
3205 break;
3206 case SIL_FAULT_MCEERR:
3207 new.si_addr = ptr_to_compat(from->si_addr);
3208#ifdef __ARCH_SI_TRAPNO
3209 new.si_trapno = from->si_trapno;
3210#endif
3211 new.si_addr_lsb = from->si_addr_lsb;
3212 break;
3213 case SIL_FAULT_BNDERR:
3214 new.si_addr = ptr_to_compat(from->si_addr);
3215#ifdef __ARCH_SI_TRAPNO
3216 new.si_trapno = from->si_trapno;
3217#endif
3218 new.si_lower = ptr_to_compat(from->si_lower);
3219 new.si_upper = ptr_to_compat(from->si_upper);
3220 break;
3221 case SIL_FAULT_PKUERR:
3222 new.si_addr = ptr_to_compat(from->si_addr);
3223#ifdef __ARCH_SI_TRAPNO
3224 new.si_trapno = from->si_trapno;
3225#endif
3226 new.si_pkey = from->si_pkey;
3227 break;
3228 case SIL_CHLD:
3229 new.si_pid = from->si_pid;
3230 new.si_uid = from->si_uid;
3231 new.si_status = from->si_status;
3232#ifdef CONFIG_X86_X32_ABI
3233 if (x32_ABI) {
3234 new._sifields._sigchld_x32._utime = from->si_utime;
3235 new._sifields._sigchld_x32._stime = from->si_stime;
3236 } else
3237#endif
3238 {
3239 new.si_utime = from->si_utime;
3240 new.si_stime = from->si_stime;
3241 }
3242 break;
3243 case SIL_RT:
3244 new.si_pid = from->si_pid;
3245 new.si_uid = from->si_uid;
3246 new.si_int = from->si_int;
3247 break;
3248 case SIL_SYS:
3249 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3250 new.si_syscall = from->si_syscall;
3251 new.si_arch = from->si_arch;
3252 break;
3253 }
3254
3255 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3256 return -EFAULT;
3257
3258 return 0;
3259}
3260
3261static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3262 const struct compat_siginfo *from)
3263{
3264 clear_siginfo(to);
3265 to->si_signo = from->si_signo;
3266 to->si_errno = from->si_errno;
3267 to->si_code = from->si_code;
3268 switch(siginfo_layout(from->si_signo, from->si_code)) {
3269 case SIL_KILL:
3270 to->si_pid = from->si_pid;
3271 to->si_uid = from->si_uid;
3272 break;
3273 case SIL_TIMER:
3274 to->si_tid = from->si_tid;
3275 to->si_overrun = from->si_overrun;
3276 to->si_int = from->si_int;
3277 break;
3278 case SIL_POLL:
3279 to->si_band = from->si_band;
3280 to->si_fd = from->si_fd;
3281 break;
3282 case SIL_FAULT:
3283 to->si_addr = compat_ptr(from->si_addr);
3284#ifdef __ARCH_SI_TRAPNO
3285 to->si_trapno = from->si_trapno;
3286#endif
3287 break;
3288 case SIL_FAULT_MCEERR:
3289 to->si_addr = compat_ptr(from->si_addr);
3290#ifdef __ARCH_SI_TRAPNO
3291 to->si_trapno = from->si_trapno;
3292#endif
3293 to->si_addr_lsb = from->si_addr_lsb;
3294 break;
3295 case SIL_FAULT_BNDERR:
3296 to->si_addr = compat_ptr(from->si_addr);
3297#ifdef __ARCH_SI_TRAPNO
3298 to->si_trapno = from->si_trapno;
3299#endif
3300 to->si_lower = compat_ptr(from->si_lower);
3301 to->si_upper = compat_ptr(from->si_upper);
3302 break;
3303 case SIL_FAULT_PKUERR:
3304 to->si_addr = compat_ptr(from->si_addr);
3305#ifdef __ARCH_SI_TRAPNO
3306 to->si_trapno = from->si_trapno;
3307#endif
3308 to->si_pkey = from->si_pkey;
3309 break;
3310 case SIL_CHLD:
3311 to->si_pid = from->si_pid;
3312 to->si_uid = from->si_uid;
3313 to->si_status = from->si_status;
3314#ifdef CONFIG_X86_X32_ABI
3315 if (in_x32_syscall()) {
3316 to->si_utime = from->_sifields._sigchld_x32._utime;
3317 to->si_stime = from->_sifields._sigchld_x32._stime;
3318 } else
3319#endif
3320 {
3321 to->si_utime = from->si_utime;
3322 to->si_stime = from->si_stime;
3323 }
3324 break;
3325 case SIL_RT:
3326 to->si_pid = from->si_pid;
3327 to->si_uid = from->si_uid;
3328 to->si_int = from->si_int;
3329 break;
3330 case SIL_SYS:
3331 to->si_call_addr = compat_ptr(from->si_call_addr);
3332 to->si_syscall = from->si_syscall;
3333 to->si_arch = from->si_arch;
3334 break;
3335 }
3336 return 0;
3337}
3338
3339static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3340 const struct compat_siginfo __user *ufrom)
3341{
3342 struct compat_siginfo from;
3343
3344 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3345 return -EFAULT;
3346
3347 from.si_signo = signo;
3348 return post_copy_siginfo_from_user32(to, &from);
3349}
3350
3351int copy_siginfo_from_user32(struct kernel_siginfo *to,
3352 const struct compat_siginfo __user *ufrom)
3353{
3354 struct compat_siginfo from;
3355
3356 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3357 return -EFAULT;
3358
3359 return post_copy_siginfo_from_user32(to, &from);
3360}
3361#endif
3362
3363
3364
3365
3366
3367
3368
3369static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3370 const struct timespec64 *ts)
3371{
3372 ktime_t *to = NULL, timeout = KTIME_MAX;
3373 struct task_struct *tsk = current;
3374 sigset_t mask = *which;
3375 int sig, ret = 0;
3376
3377 if (ts) {
3378 if (!timespec64_valid(ts))
3379 return -EINVAL;
3380 timeout = timespec64_to_ktime(*ts);
3381 to = &timeout;
3382 }
3383
3384
3385
3386
3387 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3388 signotset(&mask);
3389
3390 spin_lock_irq(&tsk->sighand->siglock);
3391 sig = dequeue_signal(tsk, &mask, info);
3392 if (!sig && timeout) {
3393
3394
3395
3396
3397
3398
3399 tsk->real_blocked = tsk->blocked;
3400 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3401 recalc_sigpending();
3402 spin_unlock_irq(&tsk->sighand->siglock);
3403
3404 __set_current_state(TASK_INTERRUPTIBLE);
3405 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3406 HRTIMER_MODE_REL);
3407 spin_lock_irq(&tsk->sighand->siglock);
3408 __set_task_blocked(tsk, &tsk->real_blocked);
3409 sigemptyset(&tsk->real_blocked);
3410 sig = dequeue_signal(tsk, &mask, info);
3411 }
3412 spin_unlock_irq(&tsk->sighand->siglock);
3413
3414 if (sig)
3415 return sig;
3416 return ret ? -EINTR : -EAGAIN;
3417}
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3428 siginfo_t __user *, uinfo,
3429 const struct __kernel_timespec __user *, uts,
3430 size_t, sigsetsize)
3431{
3432 sigset_t these;
3433 struct timespec64 ts;
3434 kernel_siginfo_t info;
3435 int ret;
3436
3437
3438 if (sigsetsize != sizeof(sigset_t))
3439 return -EINVAL;
3440
3441 if (copy_from_user(&these, uthese, sizeof(these)))
3442 return -EFAULT;
3443
3444 if (uts) {
3445 if (get_timespec64(&ts, uts))
3446 return -EFAULT;
3447 }
3448
3449 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3450
3451 if (ret > 0 && uinfo) {
3452 if (copy_siginfo_to_user(uinfo, &info))
3453 ret = -EFAULT;
3454 }
3455
3456 return ret;
3457}
3458
3459#ifdef CONFIG_COMPAT_32BIT_TIME
3460SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3461 siginfo_t __user *, uinfo,
3462 const struct old_timespec32 __user *, uts,
3463 size_t, sigsetsize)
3464{
3465 sigset_t these;
3466 struct timespec64 ts;
3467 kernel_siginfo_t info;
3468 int ret;
3469
3470 if (sigsetsize != sizeof(sigset_t))
3471 return -EINVAL;
3472
3473 if (copy_from_user(&these, uthese, sizeof(these)))
3474 return -EFAULT;
3475
3476 if (uts) {
3477 if (get_old_timespec32(&ts, uts))
3478 return -EFAULT;
3479 }
3480
3481 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3482
3483 if (ret > 0 && uinfo) {
3484 if (copy_siginfo_to_user(uinfo, &info))
3485 ret = -EFAULT;
3486 }
3487
3488 return ret;
3489}
3490#endif
3491
3492#ifdef CONFIG_COMPAT
3493COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3494 struct compat_siginfo __user *, uinfo,
3495 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3496{
3497 sigset_t s;
3498 struct timespec64 t;
3499 kernel_siginfo_t info;
3500 long ret;
3501
3502 if (sigsetsize != sizeof(sigset_t))
3503 return -EINVAL;
3504
3505 if (get_compat_sigset(&s, uthese))
3506 return -EFAULT;
3507
3508 if (uts) {
3509 if (get_timespec64(&t, uts))
3510 return -EFAULT;
3511 }
3512
3513 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3514
3515 if (ret > 0 && uinfo) {
3516 if (copy_siginfo_to_user32(uinfo, &info))
3517 ret = -EFAULT;
3518 }
3519
3520 return ret;
3521}
3522
3523#ifdef CONFIG_COMPAT_32BIT_TIME
3524COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3525 struct compat_siginfo __user *, uinfo,
3526 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3527{
3528 sigset_t s;
3529 struct timespec64 t;
3530 kernel_siginfo_t info;
3531 long ret;
3532
3533 if (sigsetsize != sizeof(sigset_t))
3534 return -EINVAL;
3535
3536 if (get_compat_sigset(&s, uthese))
3537 return -EFAULT;
3538
3539 if (uts) {
3540 if (get_old_timespec32(&t, uts))
3541 return -EFAULT;
3542 }
3543
3544 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3545
3546 if (ret > 0 && uinfo) {
3547 if (copy_siginfo_to_user32(uinfo, &info))
3548 ret = -EFAULT;
3549 }
3550
3551 return ret;
3552}
3553#endif
3554#endif
3555
3556static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3557{
3558 clear_siginfo(info);
3559 info->si_signo = sig;
3560 info->si_errno = 0;
3561 info->si_code = SI_USER;
3562 info->si_pid = task_tgid_vnr(current);
3563 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3564}
3565
3566
3567
3568
3569
3570
3571SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3572{
3573 struct kernel_siginfo info;
3574
3575 prepare_kill_siginfo(sig, &info);
3576
3577 return kill_something_info(sig, &info, pid);
3578}
3579
3580
3581
3582
3583
3584
3585static bool access_pidfd_pidns(struct pid *pid)
3586{
3587 struct pid_namespace *active = task_active_pid_ns(current);
3588 struct pid_namespace *p = ns_of_pid(pid);
3589
3590 for (;;) {
3591 if (!p)
3592 return false;
3593 if (p == active)
3594 break;
3595 p = p->parent;
3596 }
3597
3598 return true;
3599}
3600
3601static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3602{
3603#ifdef CONFIG_COMPAT
3604
3605
3606
3607
3608
3609 if (in_compat_syscall())
3610 return copy_siginfo_from_user32(
3611 kinfo, (struct compat_siginfo __user *)info);
3612#endif
3613 return copy_siginfo_from_user(kinfo, info);
3614}
3615
3616static struct pid *pidfd_to_pid(const struct file *file)
3617{
3618 if (file->f_op == &pidfd_fops)
3619 return file->private_data;
3620
3621 return tgid_pidfd_to_pid(file);
3622}
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3643 siginfo_t __user *, info, unsigned int, flags)
3644{
3645 int ret;
3646 struct fd f;
3647 struct pid *pid;
3648 kernel_siginfo_t kinfo;
3649
3650
3651 if (flags)
3652 return -EINVAL;
3653
3654 f = fdget(pidfd);
3655 if (!f.file)
3656 return -EBADF;
3657
3658
3659 pid = pidfd_to_pid(f.file);
3660 if (IS_ERR(pid)) {
3661 ret = PTR_ERR(pid);
3662 goto err;
3663 }
3664
3665 ret = -EINVAL;
3666 if (!access_pidfd_pidns(pid))
3667 goto err;
3668
3669 if (info) {
3670 ret = copy_siginfo_from_user_any(&kinfo, info);
3671 if (unlikely(ret))
3672 goto err;
3673
3674 ret = -EINVAL;
3675 if (unlikely(sig != kinfo.si_signo))
3676 goto err;
3677
3678
3679 ret = -EPERM;
3680 if ((task_pid(current) != pid) &&
3681 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3682 goto err;
3683 } else {
3684 prepare_kill_siginfo(sig, &kinfo);
3685 }
3686
3687 ret = kill_pid_info(sig, &kinfo, pid);
3688
3689err:
3690 fdput(f);
3691 return ret;
3692}
3693
3694static int
3695do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3696{
3697 struct task_struct *p;
3698 int error = -ESRCH;
3699
3700 rcu_read_lock();
3701 p = find_task_by_vpid(pid);
3702 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3703 error = check_kill_permission(sig, info, p);
3704
3705
3706
3707
3708 if (!error && sig) {
3709 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3710
3711
3712
3713
3714
3715 if (unlikely(error == -ESRCH))
3716 error = 0;
3717 }
3718 }
3719 rcu_read_unlock();
3720
3721 return error;
3722}
3723
3724static int do_tkill(pid_t tgid, pid_t pid, int sig)
3725{
3726 struct kernel_siginfo info;
3727
3728 clear_siginfo(&info);
3729 info.si_signo = sig;
3730 info.si_errno = 0;
3731 info.si_code = SI_TKILL;
3732 info.si_pid = task_tgid_vnr(current);
3733 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3734
3735 return do_send_specific(tgid, pid, sig, &info);
3736}
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3749{
3750
3751 if (pid <= 0 || tgid <= 0)
3752 return -EINVAL;
3753
3754 return do_tkill(tgid, pid, sig);
3755}
3756
3757
3758
3759
3760
3761
3762
3763
3764SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3765{
3766
3767 if (pid <= 0)
3768 return -EINVAL;
3769
3770 return do_tkill(0, pid, sig);
3771}
3772
3773static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3774{
3775
3776
3777
3778 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3779 (task_pid_vnr(current) != pid))
3780 return -EPERM;
3781
3782
3783 return kill_proc_info(sig, info, pid);
3784}
3785
3786
3787
3788
3789
3790
3791
3792SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3793 siginfo_t __user *, uinfo)
3794{
3795 kernel_siginfo_t info;
3796 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3797 if (unlikely(ret))
3798 return ret;
3799 return do_rt_sigqueueinfo(pid, sig, &info);
3800}
3801
3802#ifdef CONFIG_COMPAT
3803COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3804 compat_pid_t, pid,
3805 int, sig,
3806 struct compat_siginfo __user *, uinfo)
3807{
3808 kernel_siginfo_t info;
3809 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3810 if (unlikely(ret))
3811 return ret;
3812 return do_rt_sigqueueinfo(pid, sig, &info);
3813}
3814#endif
3815
3816static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3817{
3818
3819 if (pid <= 0 || tgid <= 0)
3820 return -EINVAL;
3821
3822
3823
3824
3825 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3826 (task_pid_vnr(current) != pid))
3827 return -EPERM;
3828
3829 return do_send_specific(tgid, pid, sig, info);
3830}
3831
3832SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3833 siginfo_t __user *, uinfo)
3834{
3835 kernel_siginfo_t info;
3836 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3837 if (unlikely(ret))
3838 return ret;
3839 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3840}
3841
3842#ifdef CONFIG_COMPAT
3843COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3844 compat_pid_t, tgid,
3845 compat_pid_t, pid,
3846 int, sig,
3847 struct compat_siginfo __user *, uinfo)
3848{
3849 kernel_siginfo_t info;
3850 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3851 if (unlikely(ret))
3852 return ret;
3853 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3854}
3855#endif
3856
3857
3858
3859
3860void kernel_sigaction(int sig, __sighandler_t action)
3861{
3862 spin_lock_irq(¤t->sighand->siglock);
3863 current->sighand->action[sig - 1].sa.sa_handler = action;
3864 if (action == SIG_IGN) {
3865 sigset_t mask;
3866
3867 sigemptyset(&mask);
3868 sigaddset(&mask, sig);
3869
3870 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3871 flush_sigqueue_mask(&mask, ¤t->pending);
3872 recalc_sigpending();
3873 }
3874 spin_unlock_irq(¤t->sighand->siglock);
3875}
3876EXPORT_SYMBOL(kernel_sigaction);
3877
3878void __weak sigaction_compat_abi(struct k_sigaction *act,
3879 struct k_sigaction *oact)
3880{
3881}
3882
3883int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3884{
3885 struct task_struct *p = current, *t;
3886 struct k_sigaction *k;
3887 sigset_t mask;
3888
3889 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3890 return -EINVAL;
3891
3892 k = &p->sighand->action[sig-1];
3893
3894 spin_lock_irq(&p->sighand->siglock);
3895 if (oact)
3896 *oact = *k;
3897
3898 sigaction_compat_abi(act, oact);
3899
3900 if (act) {
3901 sigdelsetmask(&act->sa.sa_mask,
3902 sigmask(SIGKILL) | sigmask(SIGSTOP));
3903 *k = *act;
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3916 sigemptyset(&mask);
3917 sigaddset(&mask, sig);
3918 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3919 for_each_thread(p, t)
3920 flush_sigqueue_mask(&mask, &t->pending);
3921 }
3922 }
3923
3924 spin_unlock_irq(&p->sighand->siglock);
3925 return 0;
3926}
3927
3928static int
3929do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3930 size_t min_ss_size)
3931{
3932 struct task_struct *t = current;
3933
3934 if (oss) {
3935 memset(oss, 0, sizeof(stack_t));
3936 oss->ss_sp = (void __user *) t->sas_ss_sp;
3937 oss->ss_size = t->sas_ss_size;
3938 oss->ss_flags = sas_ss_flags(sp) |
3939 (current->sas_ss_flags & SS_FLAG_BITS);
3940 }
3941
3942 if (ss) {
3943 void __user *ss_sp = ss->ss_sp;
3944 size_t ss_size = ss->ss_size;
3945 unsigned ss_flags = ss->ss_flags;
3946 int ss_mode;
3947
3948 if (unlikely(on_sig_stack(sp)))
3949 return -EPERM;
3950
3951 ss_mode = ss_flags & ~SS_FLAG_BITS;
3952 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3953 ss_mode != 0))
3954 return -EINVAL;
3955
3956 if (ss_mode == SS_DISABLE) {
3957 ss_size = 0;
3958 ss_sp = NULL;
3959 } else {
3960 if (unlikely(ss_size < min_ss_size))
3961 return -ENOMEM;
3962 }
3963
3964 t->sas_ss_sp = (unsigned long) ss_sp;
3965 t->sas_ss_size = ss_size;
3966 t->sas_ss_flags = ss_flags;
3967 }
3968 return 0;
3969}
3970
3971SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3972{
3973 stack_t new, old;
3974 int err;
3975 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3976 return -EFAULT;
3977 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3978 current_user_stack_pointer(),
3979 MINSIGSTKSZ);
3980 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3981 err = -EFAULT;
3982 return err;
3983}
3984
3985int restore_altstack(const stack_t __user *uss)
3986{
3987 stack_t new;
3988 if (copy_from_user(&new, uss, sizeof(stack_t)))
3989 return -EFAULT;
3990 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
3991 MINSIGSTKSZ);
3992
3993 return 0;
3994}
3995
3996int __save_altstack(stack_t __user *uss, unsigned long sp)
3997{
3998 struct task_struct *t = current;
3999 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4000 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4001 __put_user(t->sas_ss_size, &uss->ss_size);
4002 if (err)
4003 return err;
4004 if (t->sas_ss_flags & SS_AUTODISARM)
4005 sas_ss_reset(t);
4006 return 0;
4007}
4008
4009#ifdef CONFIG_COMPAT
4010static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4011 compat_stack_t __user *uoss_ptr)
4012{
4013 stack_t uss, uoss;
4014 int ret;
4015
4016 if (uss_ptr) {
4017 compat_stack_t uss32;
4018 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4019 return -EFAULT;
4020 uss.ss_sp = compat_ptr(uss32.ss_sp);
4021 uss.ss_flags = uss32.ss_flags;
4022 uss.ss_size = uss32.ss_size;
4023 }
4024 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4025 compat_user_stack_pointer(),
4026 COMPAT_MINSIGSTKSZ);
4027 if (ret >= 0 && uoss_ptr) {
4028 compat_stack_t old;
4029 memset(&old, 0, sizeof(old));
4030 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4031 old.ss_flags = uoss.ss_flags;
4032 old.ss_size = uoss.ss_size;
4033 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4034 ret = -EFAULT;
4035 }
4036 return ret;
4037}
4038
4039COMPAT_SYSCALL_DEFINE2(sigaltstack,
4040 const compat_stack_t __user *, uss_ptr,
4041 compat_stack_t __user *, uoss_ptr)
4042{
4043 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4044}
4045
4046int compat_restore_altstack(const compat_stack_t __user *uss)
4047{
4048 int err = do_compat_sigaltstack(uss, NULL);
4049
4050 return err == -EFAULT ? err : 0;
4051}
4052
4053int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4054{
4055 int err;
4056 struct task_struct *t = current;
4057 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4058 &uss->ss_sp) |
4059 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4060 __put_user(t->sas_ss_size, &uss->ss_size);
4061 if (err)
4062 return err;
4063 if (t->sas_ss_flags & SS_AUTODISARM)
4064 sas_ss_reset(t);
4065 return 0;
4066}
4067#endif
4068
4069#ifdef __ARCH_WANT_SYS_SIGPENDING
4070
4071
4072
4073
4074
4075SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4076{
4077 sigset_t set;
4078
4079 if (sizeof(old_sigset_t) > sizeof(*uset))
4080 return -EINVAL;
4081
4082 do_sigpending(&set);
4083
4084 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4085 return -EFAULT;
4086
4087 return 0;
4088}
4089
4090#ifdef CONFIG_COMPAT
4091COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4092{
4093 sigset_t set;
4094
4095 do_sigpending(&set);
4096
4097 return put_user(set.sig[0], set32);
4098}
4099#endif
4100
4101#endif
4102
4103#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4115 old_sigset_t __user *, oset)
4116{
4117 old_sigset_t old_set, new_set;
4118 sigset_t new_blocked;
4119
4120 old_set = current->blocked.sig[0];
4121
4122 if (nset) {
4123 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4124 return -EFAULT;
4125
4126 new_blocked = current->blocked;
4127
4128 switch (how) {
4129 case SIG_BLOCK:
4130 sigaddsetmask(&new_blocked, new_set);
4131 break;
4132 case SIG_UNBLOCK:
4133 sigdelsetmask(&new_blocked, new_set);
4134 break;
4135 case SIG_SETMASK:
4136 new_blocked.sig[0] = new_set;
4137 break;
4138 default:
4139 return -EINVAL;
4140 }
4141
4142 set_current_blocked(&new_blocked);
4143 }
4144
4145 if (oset) {
4146 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4147 return -EFAULT;
4148 }
4149
4150 return 0;
4151}
4152#endif
4153
4154#ifndef CONFIG_ODD_RT_SIGACTION
4155
4156
4157
4158
4159
4160
4161
4162SYSCALL_DEFINE4(rt_sigaction, int, sig,
4163 const struct sigaction __user *, act,
4164 struct sigaction __user *, oact,
4165 size_t, sigsetsize)
4166{
4167 struct k_sigaction new_sa, old_sa;
4168 int ret;
4169
4170
4171 if (sigsetsize != sizeof(sigset_t))
4172 return -EINVAL;
4173
4174 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4175 return -EFAULT;
4176
4177 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4178 if (ret)
4179 return ret;
4180
4181 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4182 return -EFAULT;
4183
4184 return 0;
4185}
4186#ifdef CONFIG_COMPAT
4187COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4188 const struct compat_sigaction __user *, act,
4189 struct compat_sigaction __user *, oact,
4190 compat_size_t, sigsetsize)
4191{
4192 struct k_sigaction new_ka, old_ka;
4193#ifdef __ARCH_HAS_SA_RESTORER
4194 compat_uptr_t restorer;
4195#endif
4196 int ret;
4197
4198
4199 if (sigsetsize != sizeof(compat_sigset_t))
4200 return -EINVAL;
4201
4202 if (act) {
4203 compat_uptr_t handler;
4204 ret = get_user(handler, &act->sa_handler);
4205 new_ka.sa.sa_handler = compat_ptr(handler);
4206#ifdef __ARCH_HAS_SA_RESTORER
4207 ret |= get_user(restorer, &act->sa_restorer);
4208 new_ka.sa.sa_restorer = compat_ptr(restorer);
4209#endif
4210 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4211 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4212 if (ret)
4213 return -EFAULT;
4214 }
4215
4216 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4217 if (!ret && oact) {
4218 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4219 &oact->sa_handler);
4220 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4221 sizeof(oact->sa_mask));
4222 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4223#ifdef __ARCH_HAS_SA_RESTORER
4224 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4225 &oact->sa_restorer);
4226#endif
4227 }
4228 return ret;
4229}
4230#endif
4231#endif
4232
4233#ifdef CONFIG_OLD_SIGACTION
4234SYSCALL_DEFINE3(sigaction, int, sig,
4235 const struct old_sigaction __user *, act,
4236 struct old_sigaction __user *, oact)
4237{
4238 struct k_sigaction new_ka, old_ka;
4239 int ret;
4240
4241 if (act) {
4242 old_sigset_t mask;
4243 if (!access_ok(act, sizeof(*act)) ||
4244 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4245 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4246 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4247 __get_user(mask, &act->sa_mask))
4248 return -EFAULT;
4249#ifdef __ARCH_HAS_KA_RESTORER
4250 new_ka.ka_restorer = NULL;
4251#endif
4252 siginitset(&new_ka.sa.sa_mask, mask);
4253 }
4254
4255 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4256
4257 if (!ret && oact) {
4258 if (!access_ok(oact, sizeof(*oact)) ||
4259 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4260 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4261 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4262 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4263 return -EFAULT;
4264 }
4265
4266 return ret;
4267}
4268#endif
4269#ifdef CONFIG_COMPAT_OLD_SIGACTION
4270COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4271 const struct compat_old_sigaction __user *, act,
4272 struct compat_old_sigaction __user *, oact)
4273{
4274 struct k_sigaction new_ka, old_ka;
4275 int ret;
4276 compat_old_sigset_t mask;
4277 compat_uptr_t handler, restorer;
4278
4279 if (act) {
4280 if (!access_ok(act, sizeof(*act)) ||
4281 __get_user(handler, &act->sa_handler) ||
4282 __get_user(restorer, &act->sa_restorer) ||
4283 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4284 __get_user(mask, &act->sa_mask))
4285 return -EFAULT;
4286
4287#ifdef __ARCH_HAS_KA_RESTORER
4288 new_ka.ka_restorer = NULL;
4289#endif
4290 new_ka.sa.sa_handler = compat_ptr(handler);
4291 new_ka.sa.sa_restorer = compat_ptr(restorer);
4292 siginitset(&new_ka.sa.sa_mask, mask);
4293 }
4294
4295 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4296
4297 if (!ret && oact) {
4298 if (!access_ok(oact, sizeof(*oact)) ||
4299 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4300 &oact->sa_handler) ||
4301 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4302 &oact->sa_restorer) ||
4303 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4304 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4305 return -EFAULT;
4306 }
4307 return ret;
4308}
4309#endif
4310
4311#ifdef CONFIG_SGETMASK_SYSCALL
4312
4313
4314
4315
4316SYSCALL_DEFINE0(sgetmask)
4317{
4318
4319 return current->blocked.sig[0];
4320}
4321
4322SYSCALL_DEFINE1(ssetmask, int, newmask)
4323{
4324 int old = current->blocked.sig[0];
4325 sigset_t newset;
4326
4327 siginitset(&newset, newmask);
4328 set_current_blocked(&newset);
4329
4330 return old;
4331}
4332#endif
4333
4334#ifdef __ARCH_WANT_SYS_SIGNAL
4335
4336
4337
4338SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4339{
4340 struct k_sigaction new_sa, old_sa;
4341 int ret;
4342
4343 new_sa.sa.sa_handler = handler;
4344 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4345 sigemptyset(&new_sa.sa.sa_mask);
4346
4347 ret = do_sigaction(sig, &new_sa, &old_sa);
4348
4349 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4350}
4351#endif
4352
4353#ifdef __ARCH_WANT_SYS_PAUSE
4354
4355SYSCALL_DEFINE0(pause)
4356{
4357 while (!signal_pending(current)) {
4358 __set_current_state(TASK_INTERRUPTIBLE);
4359 schedule();
4360 }
4361 return -ERESTARTNOHAND;
4362}
4363
4364#endif
4365
4366static int sigsuspend(sigset_t *set)
4367{
4368 current->saved_sigmask = current->blocked;
4369 set_current_blocked(set);
4370
4371 while (!signal_pending(current)) {
4372 __set_current_state(TASK_INTERRUPTIBLE);
4373 schedule();
4374 }
4375 set_restore_sigmask();
4376 return -ERESTARTNOHAND;
4377}
4378
4379
4380
4381
4382
4383
4384
4385SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4386{
4387 sigset_t newset;
4388
4389
4390 if (sigsetsize != sizeof(sigset_t))
4391 return -EINVAL;
4392
4393 if (copy_from_user(&newset, unewset, sizeof(newset)))
4394 return -EFAULT;
4395 return sigsuspend(&newset);
4396}
4397
4398#ifdef CONFIG_COMPAT
4399COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4400{
4401 sigset_t newset;
4402
4403
4404 if (sigsetsize != sizeof(sigset_t))
4405 return -EINVAL;
4406
4407 if (get_compat_sigset(&newset, unewset))
4408 return -EFAULT;
4409 return sigsuspend(&newset);
4410}
4411#endif
4412
4413#ifdef CONFIG_OLD_SIGSUSPEND
4414SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4415{
4416 sigset_t blocked;
4417 siginitset(&blocked, mask);
4418 return sigsuspend(&blocked);
4419}
4420#endif
4421#ifdef CONFIG_OLD_SIGSUSPEND3
4422SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4423{
4424 sigset_t blocked;
4425 siginitset(&blocked, mask);
4426 return sigsuspend(&blocked);
4427}
4428#endif
4429
4430__weak const char *arch_vma_name(struct vm_area_struct *vma)
4431{
4432 return NULL;
4433}
4434
4435static inline void siginfo_buildtime_checks(void)
4436{
4437 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4438
4439
4440#define CHECK_OFFSET(field) \
4441 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4442
4443
4444 CHECK_OFFSET(si_pid);
4445 CHECK_OFFSET(si_uid);
4446
4447
4448 CHECK_OFFSET(si_tid);
4449 CHECK_OFFSET(si_overrun);
4450 CHECK_OFFSET(si_value);
4451
4452
4453 CHECK_OFFSET(si_pid);
4454 CHECK_OFFSET(si_uid);
4455 CHECK_OFFSET(si_value);
4456
4457
4458 CHECK_OFFSET(si_pid);
4459 CHECK_OFFSET(si_uid);
4460 CHECK_OFFSET(si_status);
4461 CHECK_OFFSET(si_utime);
4462 CHECK_OFFSET(si_stime);
4463
4464
4465 CHECK_OFFSET(si_addr);
4466 CHECK_OFFSET(si_addr_lsb);
4467 CHECK_OFFSET(si_lower);
4468 CHECK_OFFSET(si_upper);
4469 CHECK_OFFSET(si_pkey);
4470
4471
4472 CHECK_OFFSET(si_band);
4473 CHECK_OFFSET(si_fd);
4474
4475
4476 CHECK_OFFSET(si_call_addr);
4477 CHECK_OFFSET(si_syscall);
4478 CHECK_OFFSET(si_arch);
4479#undef CHECK_OFFSET
4480}
4481
4482void __init signals_init(void)
4483{
4484 siginfo_buildtime_checks();
4485
4486 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4487}
4488
4489#ifdef CONFIG_KGDB_KDB
4490#include <linux/kdb.h>
4491
4492
4493
4494
4495
4496
4497void kdb_send_sig(struct task_struct *t, int sig)
4498{
4499 static struct task_struct *kdb_prev_t;
4500 int new_t, ret;
4501 if (!spin_trylock(&t->sighand->siglock)) {
4502 kdb_printf("Can't do kill command now.\n"
4503 "The sigmask lock is held somewhere else in "
4504 "kernel, try again later\n");
4505 return;
4506 }
4507 new_t = kdb_prev_t != t;
4508 kdb_prev_t = t;
4509 if (t->state != TASK_RUNNING && new_t) {
4510 spin_unlock(&t->sighand->siglock);
4511 kdb_printf("Process is not RUNNING, sending a signal from "
4512 "kdb risks deadlock\n"
4513 "on the run queue locks. "
4514 "The signal has _not_ been sent.\n"
4515 "Reissue the kill command if you want to risk "
4516 "the deadlock.\n");
4517 return;
4518 }
4519 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4520 spin_unlock(&t->sighand->siglock);
4521 if (ret)
4522 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4523 sig, t->pid);
4524 else
4525 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4526}
4527#endif
4528