1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/init.h>
17#include <linux/sched/mm.h>
18#include <linux/sched/user.h>
19#include <linux/sched/debug.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/sched/cputime.h>
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/proc_fs.h>
26#include <linux/tty.h>
27#include <linux/binfmts.h>
28#include <linux/coredump.h>
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/ptrace.h>
32#include <linux/signal.h>
33#include <linux/signalfd.h>
34#include <linux/ratelimit.h>
35#include <linux/tracehook.h>
36#include <linux/capability.h>
37#include <linux/freezer.h>
38#include <linux/pid_namespace.h>
39#include <linux/nsproxy.h>
40#include <linux/user_namespace.h>
41#include <linux/uprobes.h>
42#include <linux/compat.h>
43#include <linux/cn_proc.h>
44#include <linux/compiler.h>
45#include <linux/posix-timers.h>
46#include <linux/cgroup.h>
47#include <linux/audit.h>
48
49#define CREATE_TRACE_POINTS
50#include <trace/events/signal.h>
51
52#include <asm/param.h>
53#include <linux/uaccess.h>
54#include <asm/unistd.h>
55#include <asm/siginfo.h>
56#include <asm/cacheflush.h>
57#include <asm/syscall.h>
58
59
60
61
62
63static struct kmem_cache *sigqueue_cachep;
64
65int print_fatal_signals __read_mostly;
66
67static void __user *sig_handler(struct task_struct *t, int sig)
68{
69 return t->sighand->action[sig - 1].sa.sa_handler;
70}
71
72static inline bool sig_handler_ignored(void __user *handler, int sig)
73{
74
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
77}
78
79static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
80{
81 void __user *handler;
82
83 handler = sig_handler(t, sig);
84
85
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87 return true;
88
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 return true;
92
93
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
96 return true;
97
98 return sig_handler_ignored(handler, sig);
99}
100
101static bool sig_ignored(struct task_struct *t, int sig, bool force)
102{
103
104
105
106
107
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
109 return false;
110
111
112
113
114
115
116 if (t->ptrace && sig != SIGKILL)
117 return false;
118
119 return sig_task_ignored(t, sig, force);
120}
121
122
123
124
125
126static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127{
128 unsigned long ready;
129 long i;
130
131 switch (_NSIG_WORDS) {
132 default:
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
135 break;
136
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
141 break;
142
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
145 break;
146
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
148 }
149 return ready != 0;
150}
151
152#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153
154static bool recalc_sigpending_tsk(struct task_struct *t)
155{
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
161 return true;
162 }
163
164
165
166
167
168
169 return false;
170}
171
172
173
174
175
176void recalc_sigpending_and_wake(struct task_struct *t)
177{
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
180}
181
182void recalc_sigpending(void)
183{
184 if (!recalc_sigpending_tsk(current) && !freezing(current))
185 clear_thread_flag(TIF_SIGPENDING);
186
187}
188EXPORT_SYMBOL(recalc_sigpending);
189
190void calculate_sigpending(void)
191{
192
193
194
195 spin_lock_irq(¤t->sighand->siglock);
196 set_tsk_thread_flag(current, TIF_SIGPENDING);
197 recalc_sigpending();
198 spin_unlock_irq(¤t->sighand->siglock);
199}
200
201
202
203#define SYNCHRONOUS_MASK \
204 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
205 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
206
207int next_signal(struct sigpending *pending, sigset_t *mask)
208{
209 unsigned long i, *s, *m, x;
210 int sig = 0;
211
212 s = pending->signal.sig;
213 m = mask->sig;
214
215
216
217
218
219 x = *s &~ *m;
220 if (x) {
221 if (x & SYNCHRONOUS_MASK)
222 x &= SYNCHRONOUS_MASK;
223 sig = ffz(~x) + 1;
224 return sig;
225 }
226
227 switch (_NSIG_WORDS) {
228 default:
229 for (i = 1; i < _NSIG_WORDS; ++i) {
230 x = *++s &~ *++m;
231 if (!x)
232 continue;
233 sig = ffz(~x) + i*_NSIG_BPW + 1;
234 break;
235 }
236 break;
237
238 case 2:
239 x = s[1] &~ m[1];
240 if (!x)
241 break;
242 sig = ffz(~x) + _NSIG_BPW + 1;
243 break;
244
245 case 1:
246
247 break;
248 }
249
250 return sig;
251}
252
253static inline void print_dropped_signal(int sig)
254{
255 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
256
257 if (!print_fatal_signals)
258 return;
259
260 if (!__ratelimit(&ratelimit_state))
261 return;
262
263 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
264 current->comm, current->pid, sig);
265}
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
285{
286 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
287 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
288 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
289
290 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
291 return false;
292
293 if (mask & JOBCTL_STOP_SIGMASK)
294 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
295
296 task->jobctl |= mask;
297 return true;
298}
299
300
301
302
303
304
305
306
307
308
309
310
311
312void task_clear_jobctl_trapping(struct task_struct *task)
313{
314 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
315 task->jobctl &= ~JOBCTL_TRAPPING;
316 smp_mb();
317 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
318 }
319}
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
337{
338 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
339
340 if (mask & JOBCTL_STOP_PENDING)
341 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
342
343 task->jobctl &= ~mask;
344
345 if (!(task->jobctl & JOBCTL_PENDING_MASK))
346 task_clear_jobctl_trapping(task);
347}
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365static bool task_participate_group_stop(struct task_struct *task)
366{
367 struct signal_struct *sig = task->signal;
368 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
369
370 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
371
372 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
373
374 if (!consume)
375 return false;
376
377 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
378 sig->group_stop_count--;
379
380
381
382
383
384 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
385 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
386 return true;
387 }
388 return false;
389}
390
391void task_join_group_stop(struct task_struct *task)
392{
393 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
394 struct signal_struct *sig = current->signal;
395
396 if (sig->group_stop_count) {
397 sig->group_stop_count++;
398 mask |= JOBCTL_STOP_CONSUME;
399 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
400 return;
401
402
403 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
404}
405
406
407
408
409
410
411static struct sigqueue *
412__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
413 int override_rlimit, const unsigned int sigqueue_flags)
414{
415 struct sigqueue *q = NULL;
416 struct ucounts *ucounts = NULL;
417 long sigpending;
418
419
420
421
422
423
424
425
426
427 rcu_read_lock();
428 ucounts = task_ucounts(t);
429 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
430 rcu_read_unlock();
431 if (!sigpending)
432 return NULL;
433
434 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
436 } else {
437 print_dropped_signal(sig);
438 }
439
440 if (unlikely(q == NULL)) {
441 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
442 } else {
443 INIT_LIST_HEAD(&q->list);
444 q->flags = sigqueue_flags;
445 q->ucounts = ucounts;
446 }
447 return q;
448}
449
450static void __sigqueue_free(struct sigqueue *q)
451{
452 if (q->flags & SIGQUEUE_PREALLOC)
453 return;
454 if (q->ucounts) {
455 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
456 q->ucounts = NULL;
457 }
458 kmem_cache_free(sigqueue_cachep, q);
459}
460
461void flush_sigqueue(struct sigpending *queue)
462{
463 struct sigqueue *q;
464
465 sigemptyset(&queue->signal);
466 while (!list_empty(&queue->list)) {
467 q = list_entry(queue->list.next, struct sigqueue , list);
468 list_del_init(&q->list);
469 __sigqueue_free(q);
470 }
471}
472
473
474
475
476void flush_signals(struct task_struct *t)
477{
478 unsigned long flags;
479
480 spin_lock_irqsave(&t->sighand->siglock, flags);
481 clear_tsk_thread_flag(t, TIF_SIGPENDING);
482 flush_sigqueue(&t->pending);
483 flush_sigqueue(&t->signal->shared_pending);
484 spin_unlock_irqrestore(&t->sighand->siglock, flags);
485}
486EXPORT_SYMBOL(flush_signals);
487
488#ifdef CONFIG_POSIX_TIMERS
489static void __flush_itimer_signals(struct sigpending *pending)
490{
491 sigset_t signal, retain;
492 struct sigqueue *q, *n;
493
494 signal = pending->signal;
495 sigemptyset(&retain);
496
497 list_for_each_entry_safe(q, n, &pending->list, list) {
498 int sig = q->info.si_signo;
499
500 if (likely(q->info.si_code != SI_TIMER)) {
501 sigaddset(&retain, sig);
502 } else {
503 sigdelset(&signal, sig);
504 list_del_init(&q->list);
505 __sigqueue_free(q);
506 }
507 }
508
509 sigorsets(&pending->signal, &signal, &retain);
510}
511
512void flush_itimer_signals(void)
513{
514 struct task_struct *tsk = current;
515 unsigned long flags;
516
517 spin_lock_irqsave(&tsk->sighand->siglock, flags);
518 __flush_itimer_signals(&tsk->pending);
519 __flush_itimer_signals(&tsk->signal->shared_pending);
520 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
521}
522#endif
523
524void ignore_signals(struct task_struct *t)
525{
526 int i;
527
528 for (i = 0; i < _NSIG; ++i)
529 t->sighand->action[i].sa.sa_handler = SIG_IGN;
530
531 flush_signals(t);
532}
533
534
535
536
537
538void
539flush_signal_handlers(struct task_struct *t, int force_default)
540{
541 int i;
542 struct k_sigaction *ka = &t->sighand->action[0];
543 for (i = _NSIG ; i != 0 ; i--) {
544 if (force_default || ka->sa.sa_handler != SIG_IGN)
545 ka->sa.sa_handler = SIG_DFL;
546 ka->sa.sa_flags = 0;
547#ifdef __ARCH_HAS_SA_RESTORER
548 ka->sa.sa_restorer = NULL;
549#endif
550 sigemptyset(&ka->sa.sa_mask);
551 ka++;
552 }
553}
554
555bool unhandled_signal(struct task_struct *tsk, int sig)
556{
557 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558 if (is_global_init(tsk))
559 return true;
560
561 if (handler != SIG_IGN && handler != SIG_DFL)
562 return false;
563
564
565 return !tsk->ptrace;
566}
567
568static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
569 bool *resched_timer)
570{
571 struct sigqueue *q, *first = NULL;
572
573
574
575
576
577 list_for_each_entry(q, &list->list, list) {
578 if (q->info.si_signo == sig) {
579 if (first)
580 goto still_pending;
581 first = q;
582 }
583 }
584
585 sigdelset(&list->signal, sig);
586
587 if (first) {
588still_pending:
589 list_del_init(&first->list);
590 copy_siginfo(info, &first->info);
591
592 *resched_timer =
593 (first->flags & SIGQUEUE_PREALLOC) &&
594 (info->si_code == SI_TIMER) &&
595 (info->si_sys_private);
596
597 __sigqueue_free(first);
598 } else {
599
600
601
602
603
604 clear_siginfo(info);
605 info->si_signo = sig;
606 info->si_errno = 0;
607 info->si_code = SI_USER;
608 info->si_pid = 0;
609 info->si_uid = 0;
610 }
611}
612
613static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
614 kernel_siginfo_t *info, bool *resched_timer)
615{
616 int sig = next_signal(pending, mask);
617
618 if (sig)
619 collect_signal(sig, pending, info, resched_timer);
620 return sig;
621}
622
623
624
625
626
627
628
629int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
630{
631 bool resched_timer = false;
632 int signr;
633
634
635
636
637 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
638 if (!signr) {
639 signr = __dequeue_signal(&tsk->signal->shared_pending,
640 mask, info, &resched_timer);
641#ifdef CONFIG_POSIX_TIMERS
642
643
644
645
646
647
648
649
650
651
652
653
654
655 if (unlikely(signr == SIGALRM)) {
656 struct hrtimer *tmr = &tsk->signal->real_timer;
657
658 if (!hrtimer_is_queued(tmr) &&
659 tsk->signal->it_real_incr != 0) {
660 hrtimer_forward(tmr, tmr->base->get_time(),
661 tsk->signal->it_real_incr);
662 hrtimer_restart(tmr);
663 }
664 }
665#endif
666 }
667
668 recalc_sigpending();
669 if (!signr)
670 return 0;
671
672 if (unlikely(sig_kernel_stop(signr))) {
673
674
675
676
677
678
679
680
681
682
683
684
685 current->jobctl |= JOBCTL_STOP_DEQUEUED;
686 }
687#ifdef CONFIG_POSIX_TIMERS
688 if (resched_timer) {
689
690
691
692
693
694
695 spin_unlock(&tsk->sighand->siglock);
696 posixtimer_rearm(info);
697 spin_lock(&tsk->sighand->siglock);
698
699
700 info->si_sys_private = 0;
701 }
702#endif
703 return signr;
704}
705EXPORT_SYMBOL_GPL(dequeue_signal);
706
707static int dequeue_synchronous_signal(kernel_siginfo_t *info)
708{
709 struct task_struct *tsk = current;
710 struct sigpending *pending = &tsk->pending;
711 struct sigqueue *q, *sync = NULL;
712
713
714
715
716 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
717 return 0;
718
719
720
721
722 list_for_each_entry(q, &pending->list, list) {
723
724 if ((q->info.si_code > SI_USER) &&
725 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
726 sync = q;
727 goto next;
728 }
729 }
730 return 0;
731next:
732
733
734
735 list_for_each_entry_continue(q, &pending->list, list) {
736 if (q->info.si_signo == sync->info.si_signo)
737 goto still_pending;
738 }
739
740 sigdelset(&pending->signal, sync->info.si_signo);
741 recalc_sigpending();
742still_pending:
743 list_del_init(&sync->list);
744 copy_siginfo(info, &sync->info);
745 __sigqueue_free(sync);
746 return info->si_signo;
747}
748
749
750
751
752
753
754
755
756
757
758
759
760void signal_wake_up_state(struct task_struct *t, unsigned int state)
761{
762 set_tsk_thread_flag(t, TIF_SIGPENDING);
763
764
765
766
767
768
769
770 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
771 kick_process(t);
772}
773
774
775
776
777
778
779
780static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
781{
782 struct sigqueue *q, *n;
783 sigset_t m;
784
785 sigandsets(&m, mask, &s->signal);
786 if (sigisemptyset(&m))
787 return;
788
789 sigandnsets(&s->signal, &s->signal, mask);
790 list_for_each_entry_safe(q, n, &s->list, list) {
791 if (sigismember(mask, q->info.si_signo)) {
792 list_del_init(&q->list);
793 __sigqueue_free(q);
794 }
795 }
796}
797
798static inline int is_si_special(const struct kernel_siginfo *info)
799{
800 return info <= SEND_SIG_PRIV;
801}
802
803static inline bool si_fromuser(const struct kernel_siginfo *info)
804{
805 return info == SEND_SIG_NOINFO ||
806 (!is_si_special(info) && SI_FROMUSER(info));
807}
808
809
810
811
812static bool kill_ok_by_cred(struct task_struct *t)
813{
814 const struct cred *cred = current_cred();
815 const struct cred *tcred = __task_cred(t);
816
817 return uid_eq(cred->euid, tcred->suid) ||
818 uid_eq(cred->euid, tcred->uid) ||
819 uid_eq(cred->uid, tcred->suid) ||
820 uid_eq(cred->uid, tcred->uid) ||
821 ns_capable(tcred->user_ns, CAP_KILL);
822}
823
824
825
826
827
828static int check_kill_permission(int sig, struct kernel_siginfo *info,
829 struct task_struct *t)
830{
831 struct pid *sid;
832 int error;
833
834 if (!valid_signal(sig))
835 return -EINVAL;
836
837 if (!si_fromuser(info))
838 return 0;
839
840 error = audit_signal_info(sig, t);
841 if (error)
842 return error;
843
844 if (!same_thread_group(current, t) &&
845 !kill_ok_by_cred(t)) {
846 switch (sig) {
847 case SIGCONT:
848 sid = task_session(t);
849
850
851
852
853 if (!sid || sid == task_session(current))
854 break;
855 fallthrough;
856 default:
857 return -EPERM;
858 }
859 }
860
861 return security_task_kill(t, info, sig, NULL);
862}
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881static void ptrace_trap_notify(struct task_struct *t)
882{
883 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
884 assert_spin_locked(&t->sighand->siglock);
885
886 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
887 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
888}
889
890
891
892
893
894
895
896
897
898
899
900static bool prepare_signal(int sig, struct task_struct *p, bool force)
901{
902 struct signal_struct *signal = p->signal;
903 struct task_struct *t;
904 sigset_t flush;
905
906 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
907 if (!(signal->flags & SIGNAL_GROUP_EXIT))
908 return sig == SIGKILL;
909
910
911
912 } else if (sig_kernel_stop(sig)) {
913
914
915
916 siginitset(&flush, sigmask(SIGCONT));
917 flush_sigqueue_mask(&flush, &signal->shared_pending);
918 for_each_thread(p, t)
919 flush_sigqueue_mask(&flush, &t->pending);
920 } else if (sig == SIGCONT) {
921 unsigned int why;
922
923
924
925 siginitset(&flush, SIG_KERNEL_STOP_MASK);
926 flush_sigqueue_mask(&flush, &signal->shared_pending);
927 for_each_thread(p, t) {
928 flush_sigqueue_mask(&flush, &t->pending);
929 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
930 if (likely(!(t->ptrace & PT_SEIZED)))
931 wake_up_state(t, __TASK_STOPPED);
932 else
933 ptrace_trap_notify(t);
934 }
935
936
937
938
939
940
941
942
943
944 why = 0;
945 if (signal->flags & SIGNAL_STOP_STOPPED)
946 why |= SIGNAL_CLD_CONTINUED;
947 else if (signal->group_stop_count)
948 why |= SIGNAL_CLD_STOPPED;
949
950 if (why) {
951
952
953
954
955
956 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
957 signal->group_stop_count = 0;
958 signal->group_exit_code = 0;
959 }
960 }
961
962 return !sig_ignored(p, sig, force);
963}
964
965
966
967
968
969
970
971
972
973static inline bool wants_signal(int sig, struct task_struct *p)
974{
975 if (sigismember(&p->blocked, sig))
976 return false;
977
978 if (p->flags & PF_EXITING)
979 return false;
980
981 if (sig == SIGKILL)
982 return true;
983
984 if (task_is_stopped_or_traced(p))
985 return false;
986
987 return task_curr(p) || !task_sigpending(p);
988}
989
990static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
991{
992 struct signal_struct *signal = p->signal;
993 struct task_struct *t;
994
995
996
997
998
999
1000
1001 if (wants_signal(sig, p))
1002 t = p;
1003 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1004
1005
1006
1007
1008 return;
1009 else {
1010
1011
1012
1013 t = signal->curr_target;
1014 while (!wants_signal(sig, t)) {
1015 t = next_thread(t);
1016 if (t == signal->curr_target)
1017
1018
1019
1020
1021
1022 return;
1023 }
1024 signal->curr_target = t;
1025 }
1026
1027
1028
1029
1030
1031 if (sig_fatal(p, sig) &&
1032 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1033 !sigismember(&t->real_blocked, sig) &&
1034 (sig == SIGKILL || !p->ptrace)) {
1035
1036
1037
1038 if (!sig_kernel_coredump(sig)) {
1039
1040
1041
1042
1043
1044
1045 signal->flags = SIGNAL_GROUP_EXIT;
1046 signal->group_exit_code = sig;
1047 signal->group_stop_count = 0;
1048 t = p;
1049 do {
1050 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1051 sigaddset(&t->pending.signal, SIGKILL);
1052 signal_wake_up(t, 1);
1053 } while_each_thread(p, t);
1054 return;
1055 }
1056 }
1057
1058
1059
1060
1061
1062 signal_wake_up(t, sig == SIGKILL);
1063 return;
1064}
1065
1066static inline bool legacy_queue(struct sigpending *signals, int sig)
1067{
1068 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1069}
1070
1071static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1072 enum pid_type type, bool force)
1073{
1074 struct sigpending *pending;
1075 struct sigqueue *q;
1076 int override_rlimit;
1077 int ret = 0, result;
1078
1079 assert_spin_locked(&t->sighand->siglock);
1080
1081 result = TRACE_SIGNAL_IGNORED;
1082 if (!prepare_signal(sig, t, force))
1083 goto ret;
1084
1085 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1086
1087
1088
1089
1090
1091 result = TRACE_SIGNAL_ALREADY_PENDING;
1092 if (legacy_queue(pending, sig))
1093 goto ret;
1094
1095 result = TRACE_SIGNAL_DELIVERED;
1096
1097
1098
1099 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1100 goto out_set;
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111 if (sig < SIGRTMIN)
1112 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1113 else
1114 override_rlimit = 0;
1115
1116 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1117
1118 if (q) {
1119 list_add_tail(&q->list, &pending->list);
1120 switch ((unsigned long) info) {
1121 case (unsigned long) SEND_SIG_NOINFO:
1122 clear_siginfo(&q->info);
1123 q->info.si_signo = sig;
1124 q->info.si_errno = 0;
1125 q->info.si_code = SI_USER;
1126 q->info.si_pid = task_tgid_nr_ns(current,
1127 task_active_pid_ns(t));
1128 rcu_read_lock();
1129 q->info.si_uid =
1130 from_kuid_munged(task_cred_xxx(t, user_ns),
1131 current_uid());
1132 rcu_read_unlock();
1133 break;
1134 case (unsigned long) SEND_SIG_PRIV:
1135 clear_siginfo(&q->info);
1136 q->info.si_signo = sig;
1137 q->info.si_errno = 0;
1138 q->info.si_code = SI_KERNEL;
1139 q->info.si_pid = 0;
1140 q->info.si_uid = 0;
1141 break;
1142 default:
1143 copy_siginfo(&q->info, info);
1144 break;
1145 }
1146 } else if (!is_si_special(info) &&
1147 sig >= SIGRTMIN && info->si_code != SI_USER) {
1148
1149
1150
1151
1152
1153 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1154 ret = -EAGAIN;
1155 goto ret;
1156 } else {
1157
1158
1159
1160
1161 result = TRACE_SIGNAL_LOSE_INFO;
1162 }
1163
1164out_set:
1165 signalfd_notify(t, sig);
1166 sigaddset(&pending->signal, sig);
1167
1168
1169 if (type > PIDTYPE_TGID) {
1170 struct multiprocess_signals *delayed;
1171 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1172 sigset_t *signal = &delayed->signal;
1173
1174 if (sig == SIGCONT)
1175 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1176 else if (sig_kernel_stop(sig))
1177 sigdelset(signal, SIGCONT);
1178 sigaddset(signal, sig);
1179 }
1180 }
1181
1182 complete_signal(sig, t, type);
1183ret:
1184 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1185 return ret;
1186}
1187
1188static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1189{
1190 bool ret = false;
1191 switch (siginfo_layout(info->si_signo, info->si_code)) {
1192 case SIL_KILL:
1193 case SIL_CHLD:
1194 case SIL_RT:
1195 ret = true;
1196 break;
1197 case SIL_TIMER:
1198 case SIL_POLL:
1199 case SIL_FAULT:
1200 case SIL_FAULT_TRAPNO:
1201 case SIL_FAULT_MCEERR:
1202 case SIL_FAULT_BNDERR:
1203 case SIL_FAULT_PKUERR:
1204 case SIL_FAULT_PERF_EVENT:
1205 case SIL_SYS:
1206 ret = false;
1207 break;
1208 }
1209 return ret;
1210}
1211
1212static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1213 enum pid_type type)
1214{
1215
1216 bool force = false;
1217
1218 if (info == SEND_SIG_NOINFO) {
1219
1220 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1221 } else if (info == SEND_SIG_PRIV) {
1222
1223 force = true;
1224 } else if (has_si_pid_and_uid(info)) {
1225
1226 struct user_namespace *t_user_ns;
1227
1228 rcu_read_lock();
1229 t_user_ns = task_cred_xxx(t, user_ns);
1230 if (current_user_ns() != t_user_ns) {
1231 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1232 info->si_uid = from_kuid_munged(t_user_ns, uid);
1233 }
1234 rcu_read_unlock();
1235
1236
1237 force = (info->si_code == SI_KERNEL);
1238
1239
1240 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1241 info->si_pid = 0;
1242 force = true;
1243 }
1244 }
1245 return __send_signal(sig, info, t, type, force);
1246}
1247
1248static void print_fatal_signal(int signr)
1249{
1250 struct pt_regs *regs = signal_pt_regs();
1251 pr_info("potentially unexpected fatal signal %d.\n", signr);
1252
1253#if defined(__i386__) && !defined(__arch_um__)
1254 pr_info("code at %08lx: ", regs->ip);
1255 {
1256 int i;
1257 for (i = 0; i < 16; i++) {
1258 unsigned char insn;
1259
1260 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1261 break;
1262 pr_cont("%02x ", insn);
1263 }
1264 }
1265 pr_cont("\n");
1266#endif
1267 preempt_disable();
1268 show_regs(regs);
1269 preempt_enable();
1270}
1271
1272static int __init setup_print_fatal_signals(char *str)
1273{
1274 get_option (&str, &print_fatal_signals);
1275
1276 return 1;
1277}
1278
1279__setup("print-fatal-signals=", setup_print_fatal_signals);
1280
1281int
1282__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1283{
1284 return send_signal(sig, info, p, PIDTYPE_TGID);
1285}
1286
1287int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1288 enum pid_type type)
1289{
1290 unsigned long flags;
1291 int ret = -ESRCH;
1292
1293 if (lock_task_sighand(p, &flags)) {
1294 ret = send_signal(sig, info, p, type);
1295 unlock_task_sighand(p, &flags);
1296 }
1297
1298 return ret;
1299}
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312static int
1313force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool sigdfl)
1314{
1315 unsigned long int flags;
1316 int ret, blocked, ignored;
1317 struct k_sigaction *action;
1318 int sig = info->si_signo;
1319
1320 spin_lock_irqsave(&t->sighand->siglock, flags);
1321 action = &t->sighand->action[sig-1];
1322 ignored = action->sa.sa_handler == SIG_IGN;
1323 blocked = sigismember(&t->blocked, sig);
1324 if (blocked || ignored || sigdfl) {
1325 action->sa.sa_handler = SIG_DFL;
1326 if (blocked) {
1327 sigdelset(&t->blocked, sig);
1328 recalc_sigpending_and_wake(t);
1329 }
1330 }
1331
1332
1333
1334
1335 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1336 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1337 ret = send_signal(sig, info, t, PIDTYPE_PID);
1338 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1339
1340 return ret;
1341}
1342
1343int force_sig_info(struct kernel_siginfo *info)
1344{
1345 return force_sig_info_to_task(info, current, false);
1346}
1347
1348
1349
1350
1351int zap_other_threads(struct task_struct *p)
1352{
1353 struct task_struct *t = p;
1354 int count = 0;
1355
1356 p->signal->group_stop_count = 0;
1357
1358 while_each_thread(p, t) {
1359 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1360 count++;
1361
1362
1363 if (t->exit_state)
1364 continue;
1365 sigaddset(&t->pending.signal, SIGKILL);
1366 signal_wake_up(t, 1);
1367 }
1368
1369 return count;
1370}
1371
1372struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1373 unsigned long *flags)
1374{
1375 struct sighand_struct *sighand;
1376
1377 rcu_read_lock();
1378 for (;;) {
1379 sighand = rcu_dereference(tsk->sighand);
1380 if (unlikely(sighand == NULL))
1381 break;
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394 spin_lock_irqsave(&sighand->siglock, *flags);
1395 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1396 break;
1397 spin_unlock_irqrestore(&sighand->siglock, *flags);
1398 }
1399 rcu_read_unlock();
1400
1401 return sighand;
1402}
1403
1404#ifdef CONFIG_LOCKDEP
1405void lockdep_assert_task_sighand_held(struct task_struct *task)
1406{
1407 struct sighand_struct *sighand;
1408
1409 rcu_read_lock();
1410 sighand = rcu_dereference(task->sighand);
1411 if (sighand)
1412 lockdep_assert_held(&sighand->siglock);
1413 else
1414 WARN_ON_ONCE(1);
1415 rcu_read_unlock();
1416}
1417#endif
1418
1419
1420
1421
1422int group_send_sig_info(int sig, struct kernel_siginfo *info,
1423 struct task_struct *p, enum pid_type type)
1424{
1425 int ret;
1426
1427 rcu_read_lock();
1428 ret = check_kill_permission(sig, info, p);
1429 rcu_read_unlock();
1430
1431 if (!ret && sig)
1432 ret = do_send_sig_info(sig, info, p, type);
1433
1434 return ret;
1435}
1436
1437
1438
1439
1440
1441
1442int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1443{
1444 struct task_struct *p = NULL;
1445 int retval, success;
1446
1447 success = 0;
1448 retval = -ESRCH;
1449 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1450 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1451 success |= !err;
1452 retval = err;
1453 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1454 return success ? 0 : retval;
1455}
1456
1457int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1458{
1459 int error = -ESRCH;
1460 struct task_struct *p;
1461
1462 for (;;) {
1463 rcu_read_lock();
1464 p = pid_task(pid, PIDTYPE_PID);
1465 if (p)
1466 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1467 rcu_read_unlock();
1468 if (likely(!p || error != -ESRCH))
1469 return error;
1470
1471
1472
1473
1474
1475
1476 }
1477}
1478
1479static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1480{
1481 int error;
1482 rcu_read_lock();
1483 error = kill_pid_info(sig, info, find_vpid(pid));
1484 rcu_read_unlock();
1485 return error;
1486}
1487
1488static inline bool kill_as_cred_perm(const struct cred *cred,
1489 struct task_struct *target)
1490{
1491 const struct cred *pcred = __task_cred(target);
1492
1493 return uid_eq(cred->euid, pcred->suid) ||
1494 uid_eq(cred->euid, pcred->uid) ||
1495 uid_eq(cred->uid, pcred->suid) ||
1496 uid_eq(cred->uid, pcred->uid);
1497}
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1525 struct pid *pid, const struct cred *cred)
1526{
1527 struct kernel_siginfo info;
1528 struct task_struct *p;
1529 unsigned long flags;
1530 int ret = -EINVAL;
1531
1532 if (!valid_signal(sig))
1533 return ret;
1534
1535 clear_siginfo(&info);
1536 info.si_signo = sig;
1537 info.si_errno = errno;
1538 info.si_code = SI_ASYNCIO;
1539 *((sigval_t *)&info.si_pid) = addr;
1540
1541 rcu_read_lock();
1542 p = pid_task(pid, PIDTYPE_PID);
1543 if (!p) {
1544 ret = -ESRCH;
1545 goto out_unlock;
1546 }
1547 if (!kill_as_cred_perm(cred, p)) {
1548 ret = -EPERM;
1549 goto out_unlock;
1550 }
1551 ret = security_task_kill(p, &info, sig, cred);
1552 if (ret)
1553 goto out_unlock;
1554
1555 if (sig) {
1556 if (lock_task_sighand(p, &flags)) {
1557 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1558 unlock_task_sighand(p, &flags);
1559 } else
1560 ret = -ESRCH;
1561 }
1562out_unlock:
1563 rcu_read_unlock();
1564 return ret;
1565}
1566EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1567
1568
1569
1570
1571
1572
1573
1574
1575static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1576{
1577 int ret;
1578
1579 if (pid > 0)
1580 return kill_proc_info(sig, info, pid);
1581
1582
1583 if (pid == INT_MIN)
1584 return -ESRCH;
1585
1586 read_lock(&tasklist_lock);
1587 if (pid != -1) {
1588 ret = __kill_pgrp_info(sig, info,
1589 pid ? find_vpid(-pid) : task_pgrp(current));
1590 } else {
1591 int retval = 0, count = 0;
1592 struct task_struct * p;
1593
1594 for_each_process(p) {
1595 if (task_pid_vnr(p) > 1 &&
1596 !same_thread_group(p, current)) {
1597 int err = group_send_sig_info(sig, info, p,
1598 PIDTYPE_MAX);
1599 ++count;
1600 if (err != -EPERM)
1601 retval = err;
1602 }
1603 }
1604 ret = count ? retval : -ESRCH;
1605 }
1606 read_unlock(&tasklist_lock);
1607
1608 return ret;
1609}
1610
1611
1612
1613
1614
1615int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1616{
1617
1618
1619
1620
1621 if (!valid_signal(sig))
1622 return -EINVAL;
1623
1624 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1625}
1626EXPORT_SYMBOL(send_sig_info);
1627
1628#define __si_special(priv) \
1629 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1630
1631int
1632send_sig(int sig, struct task_struct *p, int priv)
1633{
1634 return send_sig_info(sig, __si_special(priv), p);
1635}
1636EXPORT_SYMBOL(send_sig);
1637
1638void force_sig(int sig)
1639{
1640 struct kernel_siginfo info;
1641
1642 clear_siginfo(&info);
1643 info.si_signo = sig;
1644 info.si_errno = 0;
1645 info.si_code = SI_KERNEL;
1646 info.si_pid = 0;
1647 info.si_uid = 0;
1648 force_sig_info(&info);
1649}
1650EXPORT_SYMBOL(force_sig);
1651
1652
1653
1654
1655
1656
1657
1658void force_sigsegv(int sig)
1659{
1660 struct task_struct *p = current;
1661
1662 if (sig == SIGSEGV) {
1663 unsigned long flags;
1664 spin_lock_irqsave(&p->sighand->siglock, flags);
1665 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1666 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1667 }
1668 force_sig(SIGSEGV);
1669}
1670
1671int force_sig_fault_to_task(int sig, int code, void __user *addr
1672 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1673 , struct task_struct *t)
1674{
1675 struct kernel_siginfo info;
1676
1677 clear_siginfo(&info);
1678 info.si_signo = sig;
1679 info.si_errno = 0;
1680 info.si_code = code;
1681 info.si_addr = addr;
1682#ifdef __ia64__
1683 info.si_imm = imm;
1684 info.si_flags = flags;
1685 info.si_isr = isr;
1686#endif
1687 return force_sig_info_to_task(&info, t, false);
1688}
1689
1690int force_sig_fault(int sig, int code, void __user *addr
1691 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1692{
1693 return force_sig_fault_to_task(sig, code, addr
1694 ___ARCH_SI_IA64(imm, flags, isr), current);
1695}
1696
1697int send_sig_fault(int sig, int code, void __user *addr
1698 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1699 , struct task_struct *t)
1700{
1701 struct kernel_siginfo info;
1702
1703 clear_siginfo(&info);
1704 info.si_signo = sig;
1705 info.si_errno = 0;
1706 info.si_code = code;
1707 info.si_addr = addr;
1708#ifdef __ia64__
1709 info.si_imm = imm;
1710 info.si_flags = flags;
1711 info.si_isr = isr;
1712#endif
1713 return send_sig_info(info.si_signo, &info, t);
1714}
1715
1716int force_sig_mceerr(int code, void __user *addr, short lsb)
1717{
1718 struct kernel_siginfo info;
1719
1720 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1721 clear_siginfo(&info);
1722 info.si_signo = SIGBUS;
1723 info.si_errno = 0;
1724 info.si_code = code;
1725 info.si_addr = addr;
1726 info.si_addr_lsb = lsb;
1727 return force_sig_info(&info);
1728}
1729
1730int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1731{
1732 struct kernel_siginfo info;
1733
1734 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1735 clear_siginfo(&info);
1736 info.si_signo = SIGBUS;
1737 info.si_errno = 0;
1738 info.si_code = code;
1739 info.si_addr = addr;
1740 info.si_addr_lsb = lsb;
1741 return send_sig_info(info.si_signo, &info, t);
1742}
1743EXPORT_SYMBOL(send_sig_mceerr);
1744
1745int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1746{
1747 struct kernel_siginfo info;
1748
1749 clear_siginfo(&info);
1750 info.si_signo = SIGSEGV;
1751 info.si_errno = 0;
1752 info.si_code = SEGV_BNDERR;
1753 info.si_addr = addr;
1754 info.si_lower = lower;
1755 info.si_upper = upper;
1756 return force_sig_info(&info);
1757}
1758
1759#ifdef SEGV_PKUERR
1760int force_sig_pkuerr(void __user *addr, u32 pkey)
1761{
1762 struct kernel_siginfo info;
1763
1764 clear_siginfo(&info);
1765 info.si_signo = SIGSEGV;
1766 info.si_errno = 0;
1767 info.si_code = SEGV_PKUERR;
1768 info.si_addr = addr;
1769 info.si_pkey = pkey;
1770 return force_sig_info(&info);
1771}
1772#endif
1773
1774int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1775{
1776 struct kernel_siginfo info;
1777
1778 clear_siginfo(&info);
1779 info.si_signo = SIGTRAP;
1780 info.si_errno = 0;
1781 info.si_code = TRAP_PERF;
1782 info.si_addr = addr;
1783 info.si_perf_data = sig_data;
1784 info.si_perf_type = type;
1785
1786 return force_sig_info(&info);
1787}
1788
1789
1790
1791
1792
1793
1794
1795
1796int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1797{
1798 struct kernel_siginfo info;
1799
1800 clear_siginfo(&info);
1801 info.si_signo = SIGSYS;
1802 info.si_code = SYS_SECCOMP;
1803 info.si_call_addr = (void __user *)KSTK_EIP(current);
1804 info.si_errno = reason;
1805 info.si_arch = syscall_get_arch(current);
1806 info.si_syscall = syscall;
1807 return force_sig_info_to_task(&info, current, force_coredump);
1808}
1809
1810
1811
1812
1813int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1814{
1815 struct kernel_siginfo info;
1816
1817 clear_siginfo(&info);
1818 info.si_signo = SIGTRAP;
1819 info.si_errno = errno;
1820 info.si_code = TRAP_HWBKPT;
1821 info.si_addr = addr;
1822 return force_sig_info(&info);
1823}
1824
1825
1826
1827
1828int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1829{
1830 struct kernel_siginfo info;
1831
1832 clear_siginfo(&info);
1833 info.si_signo = sig;
1834 info.si_errno = 0;
1835 info.si_code = code;
1836 info.si_addr = addr;
1837 info.si_trapno = trapno;
1838 return force_sig_info(&info);
1839}
1840
1841
1842
1843
1844int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1845 struct task_struct *t)
1846{
1847 struct kernel_siginfo info;
1848
1849 clear_siginfo(&info);
1850 info.si_signo = sig;
1851 info.si_errno = 0;
1852 info.si_code = code;
1853 info.si_addr = addr;
1854 info.si_trapno = trapno;
1855 return send_sig_info(info.si_signo, &info, t);
1856}
1857
1858int kill_pgrp(struct pid *pid, int sig, int priv)
1859{
1860 int ret;
1861
1862 read_lock(&tasklist_lock);
1863 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1864 read_unlock(&tasklist_lock);
1865
1866 return ret;
1867}
1868EXPORT_SYMBOL(kill_pgrp);
1869
1870int kill_pid(struct pid *pid, int sig, int priv)
1871{
1872 return kill_pid_info(sig, __si_special(priv), pid);
1873}
1874EXPORT_SYMBOL(kill_pid);
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885struct sigqueue *sigqueue_alloc(void)
1886{
1887 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1888}
1889
1890void sigqueue_free(struct sigqueue *q)
1891{
1892 unsigned long flags;
1893 spinlock_t *lock = ¤t->sighand->siglock;
1894
1895 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1896
1897
1898
1899
1900
1901 spin_lock_irqsave(lock, flags);
1902 q->flags &= ~SIGQUEUE_PREALLOC;
1903
1904
1905
1906
1907 if (!list_empty(&q->list))
1908 q = NULL;
1909 spin_unlock_irqrestore(lock, flags);
1910
1911 if (q)
1912 __sigqueue_free(q);
1913}
1914
1915int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1916{
1917 int sig = q->info.si_signo;
1918 struct sigpending *pending;
1919 struct task_struct *t;
1920 unsigned long flags;
1921 int ret, result;
1922
1923 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1924
1925 ret = -1;
1926 rcu_read_lock();
1927 t = pid_task(pid, type);
1928 if (!t || !likely(lock_task_sighand(t, &flags)))
1929 goto ret;
1930
1931 ret = 1;
1932 result = TRACE_SIGNAL_IGNORED;
1933 if (!prepare_signal(sig, t, false))
1934 goto out;
1935
1936 ret = 0;
1937 if (unlikely(!list_empty(&q->list))) {
1938
1939
1940
1941
1942 BUG_ON(q->info.si_code != SI_TIMER);
1943 q->info.si_overrun++;
1944 result = TRACE_SIGNAL_ALREADY_PENDING;
1945 goto out;
1946 }
1947 q->info.si_overrun = 0;
1948
1949 signalfd_notify(t, sig);
1950 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1951 list_add_tail(&q->list, &pending->list);
1952 sigaddset(&pending->signal, sig);
1953 complete_signal(sig, t, type);
1954 result = TRACE_SIGNAL_DELIVERED;
1955out:
1956 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1957 unlock_task_sighand(t, &flags);
1958ret:
1959 rcu_read_unlock();
1960 return ret;
1961}
1962
1963static void do_notify_pidfd(struct task_struct *task)
1964{
1965 struct pid *pid;
1966
1967 WARN_ON(task->exit_state == 0);
1968 pid = task_pid(task);
1969 wake_up_all(&pid->wait_pidfd);
1970}
1971
1972
1973
1974
1975
1976
1977
1978
1979bool do_notify_parent(struct task_struct *tsk, int sig)
1980{
1981 struct kernel_siginfo info;
1982 unsigned long flags;
1983 struct sighand_struct *psig;
1984 bool autoreap = false;
1985 u64 utime, stime;
1986
1987 BUG_ON(sig == -1);
1988
1989
1990 BUG_ON(task_is_stopped_or_traced(tsk));
1991
1992 BUG_ON(!tsk->ptrace &&
1993 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1994
1995
1996 do_notify_pidfd(tsk);
1997
1998 if (sig != SIGCHLD) {
1999
2000
2001
2002
2003 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2004 sig = SIGCHLD;
2005 }
2006
2007 clear_siginfo(&info);
2008 info.si_signo = sig;
2009 info.si_errno = 0;
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021 rcu_read_lock();
2022 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2023 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2024 task_uid(tsk));
2025 rcu_read_unlock();
2026
2027 task_cputime(tsk, &utime, &stime);
2028 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2029 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2030
2031 info.si_status = tsk->exit_code & 0x7f;
2032 if (tsk->exit_code & 0x80)
2033 info.si_code = CLD_DUMPED;
2034 else if (tsk->exit_code & 0x7f)
2035 info.si_code = CLD_KILLED;
2036 else {
2037 info.si_code = CLD_EXITED;
2038 info.si_status = tsk->exit_code >> 8;
2039 }
2040
2041 psig = tsk->parent->sighand;
2042 spin_lock_irqsave(&psig->siglock, flags);
2043 if (!tsk->ptrace && sig == SIGCHLD &&
2044 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2045 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061 autoreap = true;
2062 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2063 sig = 0;
2064 }
2065
2066
2067
2068
2069 if (valid_signal(sig) && sig)
2070 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2071 __wake_up_parent(tsk, tsk->parent);
2072 spin_unlock_irqrestore(&psig->siglock, flags);
2073
2074 return autoreap;
2075}
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090static void do_notify_parent_cldstop(struct task_struct *tsk,
2091 bool for_ptracer, int why)
2092{
2093 struct kernel_siginfo info;
2094 unsigned long flags;
2095 struct task_struct *parent;
2096 struct sighand_struct *sighand;
2097 u64 utime, stime;
2098
2099 if (for_ptracer) {
2100 parent = tsk->parent;
2101 } else {
2102 tsk = tsk->group_leader;
2103 parent = tsk->real_parent;
2104 }
2105
2106 clear_siginfo(&info);
2107 info.si_signo = SIGCHLD;
2108 info.si_errno = 0;
2109
2110
2111
2112 rcu_read_lock();
2113 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2114 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2115 rcu_read_unlock();
2116
2117 task_cputime(tsk, &utime, &stime);
2118 info.si_utime = nsec_to_clock_t(utime);
2119 info.si_stime = nsec_to_clock_t(stime);
2120
2121 info.si_code = why;
2122 switch (why) {
2123 case CLD_CONTINUED:
2124 info.si_status = SIGCONT;
2125 break;
2126 case CLD_STOPPED:
2127 info.si_status = tsk->signal->group_exit_code & 0x7f;
2128 break;
2129 case CLD_TRAPPED:
2130 info.si_status = tsk->exit_code & 0x7f;
2131 break;
2132 default:
2133 BUG();
2134 }
2135
2136 sighand = parent->sighand;
2137 spin_lock_irqsave(&sighand->siglock, flags);
2138 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2139 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2140 __group_send_sig_info(SIGCHLD, &info, parent);
2141
2142
2143
2144 __wake_up_parent(tsk, parent);
2145 spin_unlock_irqrestore(&sighand->siglock, flags);
2146}
2147
2148static inline bool may_ptrace_stop(void)
2149{
2150 if (!likely(current->ptrace))
2151 return false;
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165 if (unlikely(current->mm->core_state) &&
2166 unlikely(current->mm == current->parent->mm))
2167 return false;
2168
2169 return true;
2170}
2171
2172
2173
2174
2175
2176static bool sigkill_pending(struct task_struct *tsk)
2177{
2178 return sigismember(&tsk->pending.signal, SIGKILL) ||
2179 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2180}
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2194 __releases(¤t->sighand->siglock)
2195 __acquires(¤t->sighand->siglock)
2196{
2197 bool gstop_done = false;
2198
2199 if (arch_ptrace_stop_needed(exit_code, info)) {
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211 spin_unlock_irq(¤t->sighand->siglock);
2212 arch_ptrace_stop(exit_code, info);
2213 spin_lock_irq(¤t->sighand->siglock);
2214 if (sigkill_pending(current))
2215 return;
2216 }
2217
2218 set_special_state(TASK_TRACED);
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238 smp_wmb();
2239
2240 current->last_siginfo = info;
2241 current->exit_code = exit_code;
2242
2243
2244
2245
2246
2247
2248
2249
2250 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2251 gstop_done = task_participate_group_stop(current);
2252
2253
2254 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2255 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2256 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2257
2258
2259 task_clear_jobctl_trapping(current);
2260
2261 spin_unlock_irq(¤t->sighand->siglock);
2262 read_lock(&tasklist_lock);
2263 if (may_ptrace_stop()) {
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274 do_notify_parent_cldstop(current, true, why);
2275 if (gstop_done && ptrace_reparented(current))
2276 do_notify_parent_cldstop(current, false, why);
2277
2278
2279
2280
2281
2282
2283
2284 preempt_disable();
2285 read_unlock(&tasklist_lock);
2286 cgroup_enter_frozen();
2287 preempt_enable_no_resched();
2288 freezable_schedule();
2289 cgroup_leave_frozen(true);
2290 } else {
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301 if (gstop_done)
2302 do_notify_parent_cldstop(current, false, why);
2303
2304
2305 __set_current_state(TASK_RUNNING);
2306 if (clear_code)
2307 current->exit_code = 0;
2308 read_unlock(&tasklist_lock);
2309 }
2310
2311
2312
2313
2314
2315
2316 spin_lock_irq(¤t->sighand->siglock);
2317 current->last_siginfo = NULL;
2318
2319
2320 current->jobctl &= ~JOBCTL_LISTENING;
2321
2322
2323
2324
2325
2326
2327 recalc_sigpending_tsk(current);
2328}
2329
2330static void ptrace_do_notify(int signr, int exit_code, int why)
2331{
2332 kernel_siginfo_t info;
2333
2334 clear_siginfo(&info);
2335 info.si_signo = signr;
2336 info.si_code = exit_code;
2337 info.si_pid = task_pid_vnr(current);
2338 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2339
2340
2341 ptrace_stop(exit_code, why, 1, &info);
2342}
2343
2344void ptrace_notify(int exit_code)
2345{
2346 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2347 if (unlikely(current->task_works))
2348 task_work_run();
2349
2350 spin_lock_irq(¤t->sighand->siglock);
2351 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2352 spin_unlock_irq(¤t->sighand->siglock);
2353}
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377static bool do_signal_stop(int signr)
2378 __releases(¤t->sighand->siglock)
2379{
2380 struct signal_struct *sig = current->signal;
2381
2382 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2383 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2384 struct task_struct *t;
2385
2386
2387 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2388
2389 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2390 unlikely(signal_group_exit(sig)))
2391 return false;
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2412 sig->group_exit_code = signr;
2413
2414 sig->group_stop_count = 0;
2415
2416 if (task_set_jobctl_pending(current, signr | gstop))
2417 sig->group_stop_count++;
2418
2419 t = current;
2420 while_each_thread(current, t) {
2421
2422
2423
2424
2425
2426 if (!task_is_stopped(t) &&
2427 task_set_jobctl_pending(t, signr | gstop)) {
2428 sig->group_stop_count++;
2429 if (likely(!(t->ptrace & PT_SEIZED)))
2430 signal_wake_up(t, 0);
2431 else
2432 ptrace_trap_notify(t);
2433 }
2434 }
2435 }
2436
2437 if (likely(!current->ptrace)) {
2438 int notify = 0;
2439
2440
2441
2442
2443
2444
2445 if (task_participate_group_stop(current))
2446 notify = CLD_STOPPED;
2447
2448 set_special_state(TASK_STOPPED);
2449 spin_unlock_irq(¤t->sighand->siglock);
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460 if (notify) {
2461 read_lock(&tasklist_lock);
2462 do_notify_parent_cldstop(current, false, notify);
2463 read_unlock(&tasklist_lock);
2464 }
2465
2466
2467 cgroup_enter_frozen();
2468 freezable_schedule();
2469 return true;
2470 } else {
2471
2472
2473
2474
2475 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2476 return false;
2477 }
2478}
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495static void do_jobctl_trap(void)
2496{
2497 struct signal_struct *signal = current->signal;
2498 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2499
2500 if (current->ptrace & PT_SEIZED) {
2501 if (!signal->group_stop_count &&
2502 !(signal->flags & SIGNAL_STOP_STOPPED))
2503 signr = SIGTRAP;
2504 WARN_ON_ONCE(!signr);
2505 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2506 CLD_STOPPED);
2507 } else {
2508 WARN_ON_ONCE(!signr);
2509 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2510 current->exit_code = 0;
2511 }
2512}
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524static void do_freezer_trap(void)
2525 __releases(¤t->sighand->siglock)
2526{
2527
2528
2529
2530
2531
2532 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2533 JOBCTL_TRAP_FREEZE) {
2534 spin_unlock_irq(¤t->sighand->siglock);
2535 return;
2536 }
2537
2538
2539
2540
2541
2542
2543
2544 __set_current_state(TASK_INTERRUPTIBLE);
2545 clear_thread_flag(TIF_SIGPENDING);
2546 spin_unlock_irq(¤t->sighand->siglock);
2547 cgroup_enter_frozen();
2548 freezable_schedule();
2549}
2550
2551static int ptrace_signal(int signr, kernel_siginfo_t *info)
2552{
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2563 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2564
2565
2566 signr = current->exit_code;
2567 if (signr == 0)
2568 return signr;
2569
2570 current->exit_code = 0;
2571
2572
2573
2574
2575
2576
2577
2578 if (signr != info->si_signo) {
2579 clear_siginfo(info);
2580 info->si_signo = signr;
2581 info->si_errno = 0;
2582 info->si_code = SI_USER;
2583 rcu_read_lock();
2584 info->si_pid = task_pid_vnr(current->parent);
2585 info->si_uid = from_kuid_munged(current_user_ns(),
2586 task_uid(current->parent));
2587 rcu_read_unlock();
2588 }
2589
2590
2591 if (sigismember(¤t->blocked, signr)) {
2592 send_signal(signr, info, current, PIDTYPE_PID);
2593 signr = 0;
2594 }
2595
2596 return signr;
2597}
2598
2599static void hide_si_addr_tag_bits(struct ksignal *ksig)
2600{
2601 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2602 case SIL_FAULT:
2603 case SIL_FAULT_TRAPNO:
2604 case SIL_FAULT_MCEERR:
2605 case SIL_FAULT_BNDERR:
2606 case SIL_FAULT_PKUERR:
2607 case SIL_FAULT_PERF_EVENT:
2608 ksig->info.si_addr = arch_untagged_si_addr(
2609 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2610 break;
2611 case SIL_KILL:
2612 case SIL_TIMER:
2613 case SIL_POLL:
2614 case SIL_CHLD:
2615 case SIL_RT:
2616 case SIL_SYS:
2617 break;
2618 }
2619}
2620
2621bool get_signal(struct ksignal *ksig)
2622{
2623 struct sighand_struct *sighand = current->sighand;
2624 struct signal_struct *signal = current->signal;
2625 int signr;
2626
2627 if (unlikely(current->task_works))
2628 task_work_run();
2629
2630
2631
2632
2633
2634
2635 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2636 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2637 tracehook_notify_signal();
2638 if (!task_sigpending(current))
2639 return false;
2640 }
2641
2642 if (unlikely(uprobe_deny_signal()))
2643 return false;
2644
2645
2646
2647
2648
2649
2650 try_to_freeze();
2651
2652relock:
2653 spin_lock_irq(&sighand->siglock);
2654
2655
2656
2657
2658
2659
2660 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2661 int why;
2662
2663 if (signal->flags & SIGNAL_CLD_CONTINUED)
2664 why = CLD_CONTINUED;
2665 else
2666 why = CLD_STOPPED;
2667
2668 signal->flags &= ~SIGNAL_CLD_MASK;
2669
2670 spin_unlock_irq(&sighand->siglock);
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680 read_lock(&tasklist_lock);
2681 do_notify_parent_cldstop(current, false, why);
2682
2683 if (ptrace_reparented(current->group_leader))
2684 do_notify_parent_cldstop(current->group_leader,
2685 true, why);
2686 read_unlock(&tasklist_lock);
2687
2688 goto relock;
2689 }
2690
2691
2692 if (signal_group_exit(signal)) {
2693 ksig->info.si_signo = signr = SIGKILL;
2694 sigdelset(¤t->pending.signal, SIGKILL);
2695 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2696 &sighand->action[SIGKILL - 1]);
2697 recalc_sigpending();
2698 goto fatal;
2699 }
2700
2701 for (;;) {
2702 struct k_sigaction *ka;
2703
2704 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2705 do_signal_stop(0))
2706 goto relock;
2707
2708 if (unlikely(current->jobctl &
2709 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2710 if (current->jobctl & JOBCTL_TRAP_MASK) {
2711 do_jobctl_trap();
2712 spin_unlock_irq(&sighand->siglock);
2713 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2714 do_freezer_trap();
2715
2716 goto relock;
2717 }
2718
2719
2720
2721
2722
2723 if (unlikely(cgroup_task_frozen(current))) {
2724 spin_unlock_irq(&sighand->siglock);
2725 cgroup_leave_frozen(false);
2726 goto relock;
2727 }
2728
2729
2730
2731
2732
2733
2734
2735 signr = dequeue_synchronous_signal(&ksig->info);
2736 if (!signr)
2737 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2738
2739 if (!signr)
2740 break;
2741
2742 if (unlikely(current->ptrace) && signr != SIGKILL) {
2743 signr = ptrace_signal(signr, &ksig->info);
2744 if (!signr)
2745 continue;
2746 }
2747
2748 ka = &sighand->action[signr-1];
2749
2750
2751 trace_signal_deliver(signr, &ksig->info, ka);
2752
2753 if (ka->sa.sa_handler == SIG_IGN)
2754 continue;
2755 if (ka->sa.sa_handler != SIG_DFL) {
2756
2757 ksig->ka = *ka;
2758
2759 if (ka->sa.sa_flags & SA_ONESHOT)
2760 ka->sa.sa_handler = SIG_DFL;
2761
2762 break;
2763 }
2764
2765
2766
2767
2768 if (sig_kernel_ignore(signr))
2769 continue;
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2782 !sig_kernel_only(signr))
2783 continue;
2784
2785 if (sig_kernel_stop(signr)) {
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796 if (signr != SIGSTOP) {
2797 spin_unlock_irq(&sighand->siglock);
2798
2799
2800
2801 if (is_current_pgrp_orphaned())
2802 goto relock;
2803
2804 spin_lock_irq(&sighand->siglock);
2805 }
2806
2807 if (likely(do_signal_stop(ksig->info.si_signo))) {
2808
2809 goto relock;
2810 }
2811
2812
2813
2814
2815
2816 continue;
2817 }
2818
2819 fatal:
2820 spin_unlock_irq(&sighand->siglock);
2821 if (unlikely(cgroup_task_frozen(current)))
2822 cgroup_leave_frozen(true);
2823
2824
2825
2826
2827 current->flags |= PF_SIGNALED;
2828
2829 if (sig_kernel_coredump(signr)) {
2830 if (print_fatal_signals)
2831 print_fatal_signal(ksig->info.si_signo);
2832 proc_coredump_connector(current);
2833
2834
2835
2836
2837
2838
2839
2840
2841 do_coredump(&ksig->info);
2842 }
2843
2844
2845
2846
2847
2848
2849 if (current->flags & PF_IO_WORKER)
2850 goto out;
2851
2852
2853
2854
2855 do_group_exit(ksig->info.si_signo);
2856
2857 }
2858 spin_unlock_irq(&sighand->siglock);
2859out:
2860 ksig->sig = signr;
2861
2862 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2863 hide_si_addr_tag_bits(ksig);
2864
2865 return ksig->sig > 0;
2866}
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878static void signal_delivered(struct ksignal *ksig, int stepping)
2879{
2880 sigset_t blocked;
2881
2882
2883
2884
2885
2886 clear_restore_sigmask();
2887
2888 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2889 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2890 sigaddset(&blocked, ksig->sig);
2891 set_current_blocked(&blocked);
2892 if (current->sas_ss_flags & SS_AUTODISARM)
2893 sas_ss_reset(current);
2894 tracehook_signal_handler(stepping);
2895}
2896
2897void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2898{
2899 if (failed)
2900 force_sigsegv(ksig->sig);
2901 else
2902 signal_delivered(ksig, stepping);
2903}
2904
2905
2906
2907
2908
2909
2910static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2911{
2912 sigset_t retarget;
2913 struct task_struct *t;
2914
2915 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2916 if (sigisemptyset(&retarget))
2917 return;
2918
2919 t = tsk;
2920 while_each_thread(tsk, t) {
2921 if (t->flags & PF_EXITING)
2922 continue;
2923
2924 if (!has_pending_signals(&retarget, &t->blocked))
2925 continue;
2926
2927 sigandsets(&retarget, &retarget, &t->blocked);
2928
2929 if (!task_sigpending(t))
2930 signal_wake_up(t, 0);
2931
2932 if (sigisemptyset(&retarget))
2933 break;
2934 }
2935}
2936
2937void exit_signals(struct task_struct *tsk)
2938{
2939 int group_stop = 0;
2940 sigset_t unblocked;
2941
2942
2943
2944
2945
2946 cgroup_threadgroup_change_begin(tsk);
2947
2948 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2949 tsk->flags |= PF_EXITING;
2950 cgroup_threadgroup_change_end(tsk);
2951 return;
2952 }
2953
2954 spin_lock_irq(&tsk->sighand->siglock);
2955
2956
2957
2958
2959 tsk->flags |= PF_EXITING;
2960
2961 cgroup_threadgroup_change_end(tsk);
2962
2963 if (!task_sigpending(tsk))
2964 goto out;
2965
2966 unblocked = tsk->blocked;
2967 signotset(&unblocked);
2968 retarget_shared_pending(tsk, &unblocked);
2969
2970 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2971 task_participate_group_stop(tsk))
2972 group_stop = CLD_STOPPED;
2973out:
2974 spin_unlock_irq(&tsk->sighand->siglock);
2975
2976
2977
2978
2979
2980 if (unlikely(group_stop)) {
2981 read_lock(&tasklist_lock);
2982 do_notify_parent_cldstop(tsk, false, group_stop);
2983 read_unlock(&tasklist_lock);
2984 }
2985}
2986
2987
2988
2989
2990
2991
2992
2993
2994SYSCALL_DEFINE0(restart_syscall)
2995{
2996 struct restart_block *restart = ¤t->restart_block;
2997 return restart->fn(restart);
2998}
2999
3000long do_no_restart_syscall(struct restart_block *param)
3001{
3002 return -EINTR;
3003}
3004
3005static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3006{
3007 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3008 sigset_t newblocked;
3009
3010 sigandnsets(&newblocked, newset, ¤t->blocked);
3011 retarget_shared_pending(tsk, &newblocked);
3012 }
3013 tsk->blocked = *newset;
3014 recalc_sigpending();
3015}
3016
3017
3018
3019
3020
3021
3022
3023
3024void set_current_blocked(sigset_t *newset)
3025{
3026 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3027 __set_current_blocked(newset);
3028}
3029
3030void __set_current_blocked(const sigset_t *newset)
3031{
3032 struct task_struct *tsk = current;
3033
3034
3035
3036
3037
3038 if (sigequalsets(&tsk->blocked, newset))
3039 return;
3040
3041 spin_lock_irq(&tsk->sighand->siglock);
3042 __set_task_blocked(tsk, newset);
3043 spin_unlock_irq(&tsk->sighand->siglock);
3044}
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3055{
3056 struct task_struct *tsk = current;
3057 sigset_t newset;
3058
3059
3060 if (oldset)
3061 *oldset = tsk->blocked;
3062
3063 switch (how) {
3064 case SIG_BLOCK:
3065 sigorsets(&newset, &tsk->blocked, set);
3066 break;
3067 case SIG_UNBLOCK:
3068 sigandnsets(&newset, &tsk->blocked, set);
3069 break;
3070 case SIG_SETMASK:
3071 newset = *set;
3072 break;
3073 default:
3074 return -EINVAL;
3075 }
3076
3077 __set_current_blocked(&newset);
3078 return 0;
3079}
3080EXPORT_SYMBOL(sigprocmask);
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3092{
3093 sigset_t kmask;
3094
3095 if (!umask)
3096 return 0;
3097 if (sigsetsize != sizeof(sigset_t))
3098 return -EINVAL;
3099 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3100 return -EFAULT;
3101
3102 set_restore_sigmask();
3103 current->saved_sigmask = current->blocked;
3104 set_current_blocked(&kmask);
3105
3106 return 0;
3107}
3108
3109#ifdef CONFIG_COMPAT
3110int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3111 size_t sigsetsize)
3112{
3113 sigset_t kmask;
3114
3115 if (!umask)
3116 return 0;
3117 if (sigsetsize != sizeof(compat_sigset_t))
3118 return -EINVAL;
3119 if (get_compat_sigset(&kmask, umask))
3120 return -EFAULT;
3121
3122 set_restore_sigmask();
3123 current->saved_sigmask = current->blocked;
3124 set_current_blocked(&kmask);
3125
3126 return 0;
3127}
3128#endif
3129
3130
3131
3132
3133
3134
3135
3136
3137SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3138 sigset_t __user *, oset, size_t, sigsetsize)
3139{
3140 sigset_t old_set, new_set;
3141 int error;
3142
3143
3144 if (sigsetsize != sizeof(sigset_t))
3145 return -EINVAL;
3146
3147 old_set = current->blocked;
3148
3149 if (nset) {
3150 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3151 return -EFAULT;
3152 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3153
3154 error = sigprocmask(how, &new_set, NULL);
3155 if (error)
3156 return error;
3157 }
3158
3159 if (oset) {
3160 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3161 return -EFAULT;
3162 }
3163
3164 return 0;
3165}
3166
3167#ifdef CONFIG_COMPAT
3168COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3169 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3170{
3171 sigset_t old_set = current->blocked;
3172
3173
3174 if (sigsetsize != sizeof(sigset_t))
3175 return -EINVAL;
3176
3177 if (nset) {
3178 sigset_t new_set;
3179 int error;
3180 if (get_compat_sigset(&new_set, nset))
3181 return -EFAULT;
3182 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3183
3184 error = sigprocmask(how, &new_set, NULL);
3185 if (error)
3186 return error;
3187 }
3188 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3189}
3190#endif
3191
3192static void do_sigpending(sigset_t *set)
3193{
3194 spin_lock_irq(¤t->sighand->siglock);
3195 sigorsets(set, ¤t->pending.signal,
3196 ¤t->signal->shared_pending.signal);
3197 spin_unlock_irq(¤t->sighand->siglock);
3198
3199
3200 sigandsets(set, ¤t->blocked, set);
3201}
3202
3203
3204
3205
3206
3207
3208
3209SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3210{
3211 sigset_t set;
3212
3213 if (sigsetsize > sizeof(*uset))
3214 return -EINVAL;
3215
3216 do_sigpending(&set);
3217
3218 if (copy_to_user(uset, &set, sigsetsize))
3219 return -EFAULT;
3220
3221 return 0;
3222}
3223
3224#ifdef CONFIG_COMPAT
3225COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3226 compat_size_t, sigsetsize)
3227{
3228 sigset_t set;
3229
3230 if (sigsetsize > sizeof(*uset))
3231 return -EINVAL;
3232
3233 do_sigpending(&set);
3234
3235 return put_compat_sigset(uset, &set, sigsetsize);
3236}
3237#endif
3238
3239static const struct {
3240 unsigned char limit, layout;
3241} sig_sicodes[] = {
3242 [SIGILL] = { NSIGILL, SIL_FAULT },
3243 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3244 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3245 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3246 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3247#if defined(SIGEMT)
3248 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3249#endif
3250 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3251 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3252 [SIGSYS] = { NSIGSYS, SIL_SYS },
3253};
3254
3255static bool known_siginfo_layout(unsigned sig, int si_code)
3256{
3257 if (si_code == SI_KERNEL)
3258 return true;
3259 else if ((si_code > SI_USER)) {
3260 if (sig_specific_sicodes(sig)) {
3261 if (si_code <= sig_sicodes[sig].limit)
3262 return true;
3263 }
3264 else if (si_code <= NSIGPOLL)
3265 return true;
3266 }
3267 else if (si_code >= SI_DETHREAD)
3268 return true;
3269 else if (si_code == SI_ASYNCNL)
3270 return true;
3271 return false;
3272}
3273
3274enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3275{
3276 enum siginfo_layout layout = SIL_KILL;
3277 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3278 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3279 (si_code <= sig_sicodes[sig].limit)) {
3280 layout = sig_sicodes[sig].layout;
3281
3282 if ((sig == SIGBUS) &&
3283 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3284 layout = SIL_FAULT_MCEERR;
3285 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3286 layout = SIL_FAULT_BNDERR;
3287#ifdef SEGV_PKUERR
3288 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3289 layout = SIL_FAULT_PKUERR;
3290#endif
3291 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3292 layout = SIL_FAULT_PERF_EVENT;
3293 else if (IS_ENABLED(CONFIG_SPARC) &&
3294 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3295 layout = SIL_FAULT_TRAPNO;
3296 else if (IS_ENABLED(CONFIG_ALPHA) &&
3297 ((sig == SIGFPE) ||
3298 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3299 layout = SIL_FAULT_TRAPNO;
3300 }
3301 else if (si_code <= NSIGPOLL)
3302 layout = SIL_POLL;
3303 } else {
3304 if (si_code == SI_TIMER)
3305 layout = SIL_TIMER;
3306 else if (si_code == SI_SIGIO)
3307 layout = SIL_POLL;
3308 else if (si_code < 0)
3309 layout = SIL_RT;
3310 }
3311 return layout;
3312}
3313
3314static inline char __user *si_expansion(const siginfo_t __user *info)
3315{
3316 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3317}
3318
3319int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3320{
3321 char __user *expansion = si_expansion(to);
3322 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3323 return -EFAULT;
3324 if (clear_user(expansion, SI_EXPANSION_SIZE))
3325 return -EFAULT;
3326 return 0;
3327}
3328
3329static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3330 const siginfo_t __user *from)
3331{
3332 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3333 char __user *expansion = si_expansion(from);
3334 char buf[SI_EXPANSION_SIZE];
3335 int i;
3336
3337
3338
3339
3340
3341
3342 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3343 return -EFAULT;
3344 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3345 if (buf[i] != 0)
3346 return -E2BIG;
3347 }
3348 }
3349 return 0;
3350}
3351
3352static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3353 const siginfo_t __user *from)
3354{
3355 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3356 return -EFAULT;
3357 to->si_signo = signo;
3358 return post_copy_siginfo_from_user(to, from);
3359}
3360
3361int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3362{
3363 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3364 return -EFAULT;
3365 return post_copy_siginfo_from_user(to, from);
3366}
3367
3368#ifdef CONFIG_COMPAT
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379void copy_siginfo_to_external32(struct compat_siginfo *to,
3380 const struct kernel_siginfo *from)
3381{
3382 memset(to, 0, sizeof(*to));
3383
3384 to->si_signo = from->si_signo;
3385 to->si_errno = from->si_errno;
3386 to->si_code = from->si_code;
3387 switch(siginfo_layout(from->si_signo, from->si_code)) {
3388 case SIL_KILL:
3389 to->si_pid = from->si_pid;
3390 to->si_uid = from->si_uid;
3391 break;
3392 case SIL_TIMER:
3393 to->si_tid = from->si_tid;
3394 to->si_overrun = from->si_overrun;
3395 to->si_int = from->si_int;
3396 break;
3397 case SIL_POLL:
3398 to->si_band = from->si_band;
3399 to->si_fd = from->si_fd;
3400 break;
3401 case SIL_FAULT:
3402 to->si_addr = ptr_to_compat(from->si_addr);
3403 break;
3404 case SIL_FAULT_TRAPNO:
3405 to->si_addr = ptr_to_compat(from->si_addr);
3406 to->si_trapno = from->si_trapno;
3407 break;
3408 case SIL_FAULT_MCEERR:
3409 to->si_addr = ptr_to_compat(from->si_addr);
3410 to->si_addr_lsb = from->si_addr_lsb;
3411 break;
3412 case SIL_FAULT_BNDERR:
3413 to->si_addr = ptr_to_compat(from->si_addr);
3414 to->si_lower = ptr_to_compat(from->si_lower);
3415 to->si_upper = ptr_to_compat(from->si_upper);
3416 break;
3417 case SIL_FAULT_PKUERR:
3418 to->si_addr = ptr_to_compat(from->si_addr);
3419 to->si_pkey = from->si_pkey;
3420 break;
3421 case SIL_FAULT_PERF_EVENT:
3422 to->si_addr = ptr_to_compat(from->si_addr);
3423 to->si_perf_data = from->si_perf_data;
3424 to->si_perf_type = from->si_perf_type;
3425 break;
3426 case SIL_CHLD:
3427 to->si_pid = from->si_pid;
3428 to->si_uid = from->si_uid;
3429 to->si_status = from->si_status;
3430 to->si_utime = from->si_utime;
3431 to->si_stime = from->si_stime;
3432 break;
3433 case SIL_RT:
3434 to->si_pid = from->si_pid;
3435 to->si_uid = from->si_uid;
3436 to->si_int = from->si_int;
3437 break;
3438 case SIL_SYS:
3439 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3440 to->si_syscall = from->si_syscall;
3441 to->si_arch = from->si_arch;
3442 break;
3443 }
3444}
3445
3446int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3447 const struct kernel_siginfo *from)
3448{
3449 struct compat_siginfo new;
3450
3451 copy_siginfo_to_external32(&new, from);
3452 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3453 return -EFAULT;
3454 return 0;
3455}
3456
3457static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3458 const struct compat_siginfo *from)
3459{
3460 clear_siginfo(to);
3461 to->si_signo = from->si_signo;
3462 to->si_errno = from->si_errno;
3463 to->si_code = from->si_code;
3464 switch(siginfo_layout(from->si_signo, from->si_code)) {
3465 case SIL_KILL:
3466 to->si_pid = from->si_pid;
3467 to->si_uid = from->si_uid;
3468 break;
3469 case SIL_TIMER:
3470 to->si_tid = from->si_tid;
3471 to->si_overrun = from->si_overrun;
3472 to->si_int = from->si_int;
3473 break;
3474 case SIL_POLL:
3475 to->si_band = from->si_band;
3476 to->si_fd = from->si_fd;
3477 break;
3478 case SIL_FAULT:
3479 to->si_addr = compat_ptr(from->si_addr);
3480 break;
3481 case SIL_FAULT_TRAPNO:
3482 to->si_addr = compat_ptr(from->si_addr);
3483 to->si_trapno = from->si_trapno;
3484 break;
3485 case SIL_FAULT_MCEERR:
3486 to->si_addr = compat_ptr(from->si_addr);
3487 to->si_addr_lsb = from->si_addr_lsb;
3488 break;
3489 case SIL_FAULT_BNDERR:
3490 to->si_addr = compat_ptr(from->si_addr);
3491 to->si_lower = compat_ptr(from->si_lower);
3492 to->si_upper = compat_ptr(from->si_upper);
3493 break;
3494 case SIL_FAULT_PKUERR:
3495 to->si_addr = compat_ptr(from->si_addr);
3496 to->si_pkey = from->si_pkey;
3497 break;
3498 case SIL_FAULT_PERF_EVENT:
3499 to->si_addr = compat_ptr(from->si_addr);
3500 to->si_perf_data = from->si_perf_data;
3501 to->si_perf_type = from->si_perf_type;
3502 break;
3503 case SIL_CHLD:
3504 to->si_pid = from->si_pid;
3505 to->si_uid = from->si_uid;
3506 to->si_status = from->si_status;
3507#ifdef CONFIG_X86_X32_ABI
3508 if (in_x32_syscall()) {
3509 to->si_utime = from->_sifields._sigchld_x32._utime;
3510 to->si_stime = from->_sifields._sigchld_x32._stime;
3511 } else
3512#endif
3513 {
3514 to->si_utime = from->si_utime;
3515 to->si_stime = from->si_stime;
3516 }
3517 break;
3518 case SIL_RT:
3519 to->si_pid = from->si_pid;
3520 to->si_uid = from->si_uid;
3521 to->si_int = from->si_int;
3522 break;
3523 case SIL_SYS:
3524 to->si_call_addr = compat_ptr(from->si_call_addr);
3525 to->si_syscall = from->si_syscall;
3526 to->si_arch = from->si_arch;
3527 break;
3528 }
3529 return 0;
3530}
3531
3532static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3533 const struct compat_siginfo __user *ufrom)
3534{
3535 struct compat_siginfo from;
3536
3537 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3538 return -EFAULT;
3539
3540 from.si_signo = signo;
3541 return post_copy_siginfo_from_user32(to, &from);
3542}
3543
3544int copy_siginfo_from_user32(struct kernel_siginfo *to,
3545 const struct compat_siginfo __user *ufrom)
3546{
3547 struct compat_siginfo from;
3548
3549 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3550 return -EFAULT;
3551
3552 return post_copy_siginfo_from_user32(to, &from);
3553}
3554#endif
3555
3556
3557
3558
3559
3560
3561
3562static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3563 const struct timespec64 *ts)
3564{
3565 ktime_t *to = NULL, timeout = KTIME_MAX;
3566 struct task_struct *tsk = current;
3567 sigset_t mask = *which;
3568 int sig, ret = 0;
3569
3570 if (ts) {
3571 if (!timespec64_valid(ts))
3572 return -EINVAL;
3573 timeout = timespec64_to_ktime(*ts);
3574 to = &timeout;
3575 }
3576
3577
3578
3579
3580 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3581 signotset(&mask);
3582
3583 spin_lock_irq(&tsk->sighand->siglock);
3584 sig = dequeue_signal(tsk, &mask, info);
3585 if (!sig && timeout) {
3586
3587
3588
3589
3590
3591
3592 tsk->real_blocked = tsk->blocked;
3593 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3594 recalc_sigpending();
3595 spin_unlock_irq(&tsk->sighand->siglock);
3596
3597 __set_current_state(TASK_INTERRUPTIBLE);
3598 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3599 HRTIMER_MODE_REL);
3600 spin_lock_irq(&tsk->sighand->siglock);
3601 __set_task_blocked(tsk, &tsk->real_blocked);
3602 sigemptyset(&tsk->real_blocked);
3603 sig = dequeue_signal(tsk, &mask, info);
3604 }
3605 spin_unlock_irq(&tsk->sighand->siglock);
3606
3607 if (sig)
3608 return sig;
3609 return ret ? -EINTR : -EAGAIN;
3610}
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3621 siginfo_t __user *, uinfo,
3622 const struct __kernel_timespec __user *, uts,
3623 size_t, sigsetsize)
3624{
3625 sigset_t these;
3626 struct timespec64 ts;
3627 kernel_siginfo_t info;
3628 int ret;
3629
3630
3631 if (sigsetsize != sizeof(sigset_t))
3632 return -EINVAL;
3633
3634 if (copy_from_user(&these, uthese, sizeof(these)))
3635 return -EFAULT;
3636
3637 if (uts) {
3638 if (get_timespec64(&ts, uts))
3639 return -EFAULT;
3640 }
3641
3642 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3643
3644 if (ret > 0 && uinfo) {
3645 if (copy_siginfo_to_user(uinfo, &info))
3646 ret = -EFAULT;
3647 }
3648
3649 return ret;
3650}
3651
3652#ifdef CONFIG_COMPAT_32BIT_TIME
3653SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3654 siginfo_t __user *, uinfo,
3655 const struct old_timespec32 __user *, uts,
3656 size_t, sigsetsize)
3657{
3658 sigset_t these;
3659 struct timespec64 ts;
3660 kernel_siginfo_t info;
3661 int ret;
3662
3663 if (sigsetsize != sizeof(sigset_t))
3664 return -EINVAL;
3665
3666 if (copy_from_user(&these, uthese, sizeof(these)))
3667 return -EFAULT;
3668
3669 if (uts) {
3670 if (get_old_timespec32(&ts, uts))
3671 return -EFAULT;
3672 }
3673
3674 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3675
3676 if (ret > 0 && uinfo) {
3677 if (copy_siginfo_to_user(uinfo, &info))
3678 ret = -EFAULT;
3679 }
3680
3681 return ret;
3682}
3683#endif
3684
3685#ifdef CONFIG_COMPAT
3686COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3687 struct compat_siginfo __user *, uinfo,
3688 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3689{
3690 sigset_t s;
3691 struct timespec64 t;
3692 kernel_siginfo_t info;
3693 long ret;
3694
3695 if (sigsetsize != sizeof(sigset_t))
3696 return -EINVAL;
3697
3698 if (get_compat_sigset(&s, uthese))
3699 return -EFAULT;
3700
3701 if (uts) {
3702 if (get_timespec64(&t, uts))
3703 return -EFAULT;
3704 }
3705
3706 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3707
3708 if (ret > 0 && uinfo) {
3709 if (copy_siginfo_to_user32(uinfo, &info))
3710 ret = -EFAULT;
3711 }
3712
3713 return ret;
3714}
3715
3716#ifdef CONFIG_COMPAT_32BIT_TIME
3717COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3718 struct compat_siginfo __user *, uinfo,
3719 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3720{
3721 sigset_t s;
3722 struct timespec64 t;
3723 kernel_siginfo_t info;
3724 long ret;
3725
3726 if (sigsetsize != sizeof(sigset_t))
3727 return -EINVAL;
3728
3729 if (get_compat_sigset(&s, uthese))
3730 return -EFAULT;
3731
3732 if (uts) {
3733 if (get_old_timespec32(&t, uts))
3734 return -EFAULT;
3735 }
3736
3737 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3738
3739 if (ret > 0 && uinfo) {
3740 if (copy_siginfo_to_user32(uinfo, &info))
3741 ret = -EFAULT;
3742 }
3743
3744 return ret;
3745}
3746#endif
3747#endif
3748
3749static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3750{
3751 clear_siginfo(info);
3752 info->si_signo = sig;
3753 info->si_errno = 0;
3754 info->si_code = SI_USER;
3755 info->si_pid = task_tgid_vnr(current);
3756 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3757}
3758
3759
3760
3761
3762
3763
3764SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3765{
3766 struct kernel_siginfo info;
3767
3768 prepare_kill_siginfo(sig, &info);
3769
3770 return kill_something_info(sig, &info, pid);
3771}
3772
3773
3774
3775
3776
3777
3778static bool access_pidfd_pidns(struct pid *pid)
3779{
3780 struct pid_namespace *active = task_active_pid_ns(current);
3781 struct pid_namespace *p = ns_of_pid(pid);
3782
3783 for (;;) {
3784 if (!p)
3785 return false;
3786 if (p == active)
3787 break;
3788 p = p->parent;
3789 }
3790
3791 return true;
3792}
3793
3794static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3795 siginfo_t __user *info)
3796{
3797#ifdef CONFIG_COMPAT
3798
3799
3800
3801
3802
3803 if (in_compat_syscall())
3804 return copy_siginfo_from_user32(
3805 kinfo, (struct compat_siginfo __user *)info);
3806#endif
3807 return copy_siginfo_from_user(kinfo, info);
3808}
3809
3810static struct pid *pidfd_to_pid(const struct file *file)
3811{
3812 struct pid *pid;
3813
3814 pid = pidfd_pid(file);
3815 if (!IS_ERR(pid))
3816 return pid;
3817
3818 return tgid_pidfd_to_pid(file);
3819}
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3840 siginfo_t __user *, info, unsigned int, flags)
3841{
3842 int ret;
3843 struct fd f;
3844 struct pid *pid;
3845 kernel_siginfo_t kinfo;
3846
3847
3848 if (flags)
3849 return -EINVAL;
3850
3851 f = fdget(pidfd);
3852 if (!f.file)
3853 return -EBADF;
3854
3855
3856 pid = pidfd_to_pid(f.file);
3857 if (IS_ERR(pid)) {
3858 ret = PTR_ERR(pid);
3859 goto err;
3860 }
3861
3862 ret = -EINVAL;
3863 if (!access_pidfd_pidns(pid))
3864 goto err;
3865
3866 if (info) {
3867 ret = copy_siginfo_from_user_any(&kinfo, info);
3868 if (unlikely(ret))
3869 goto err;
3870
3871 ret = -EINVAL;
3872 if (unlikely(sig != kinfo.si_signo))
3873 goto err;
3874
3875
3876 ret = -EPERM;
3877 if ((task_pid(current) != pid) &&
3878 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3879 goto err;
3880 } else {
3881 prepare_kill_siginfo(sig, &kinfo);
3882 }
3883
3884 ret = kill_pid_info(sig, &kinfo, pid);
3885
3886err:
3887 fdput(f);
3888 return ret;
3889}
3890
3891static int
3892do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3893{
3894 struct task_struct *p;
3895 int error = -ESRCH;
3896
3897 rcu_read_lock();
3898 p = find_task_by_vpid(pid);
3899 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3900 error = check_kill_permission(sig, info, p);
3901
3902
3903
3904
3905 if (!error && sig) {
3906 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3907
3908
3909
3910
3911
3912 if (unlikely(error == -ESRCH))
3913 error = 0;
3914 }
3915 }
3916 rcu_read_unlock();
3917
3918 return error;
3919}
3920
3921static int do_tkill(pid_t tgid, pid_t pid, int sig)
3922{
3923 struct kernel_siginfo info;
3924
3925 clear_siginfo(&info);
3926 info.si_signo = sig;
3927 info.si_errno = 0;
3928 info.si_code = SI_TKILL;
3929 info.si_pid = task_tgid_vnr(current);
3930 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3931
3932 return do_send_specific(tgid, pid, sig, &info);
3933}
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3946{
3947
3948 if (pid <= 0 || tgid <= 0)
3949 return -EINVAL;
3950
3951 return do_tkill(tgid, pid, sig);
3952}
3953
3954
3955
3956
3957
3958
3959
3960
3961SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3962{
3963
3964 if (pid <= 0)
3965 return -EINVAL;
3966
3967 return do_tkill(0, pid, sig);
3968}
3969
3970static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3971{
3972
3973
3974
3975 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3976 (task_pid_vnr(current) != pid))
3977 return -EPERM;
3978
3979
3980 return kill_proc_info(sig, info, pid);
3981}
3982
3983
3984
3985
3986
3987
3988
3989SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3990 siginfo_t __user *, uinfo)
3991{
3992 kernel_siginfo_t info;
3993 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3994 if (unlikely(ret))
3995 return ret;
3996 return do_rt_sigqueueinfo(pid, sig, &info);
3997}
3998
3999#ifdef CONFIG_COMPAT
4000COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4001 compat_pid_t, pid,
4002 int, sig,
4003 struct compat_siginfo __user *, uinfo)
4004{
4005 kernel_siginfo_t info;
4006 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4007 if (unlikely(ret))
4008 return ret;
4009 return do_rt_sigqueueinfo(pid, sig, &info);
4010}
4011#endif
4012
4013static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4014{
4015
4016 if (pid <= 0 || tgid <= 0)
4017 return -EINVAL;
4018
4019
4020
4021
4022 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4023 (task_pid_vnr(current) != pid))
4024 return -EPERM;
4025
4026 return do_send_specific(tgid, pid, sig, info);
4027}
4028
4029SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4030 siginfo_t __user *, uinfo)
4031{
4032 kernel_siginfo_t info;
4033 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4034 if (unlikely(ret))
4035 return ret;
4036 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4037}
4038
4039#ifdef CONFIG_COMPAT
4040COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4041 compat_pid_t, tgid,
4042 compat_pid_t, pid,
4043 int, sig,
4044 struct compat_siginfo __user *, uinfo)
4045{
4046 kernel_siginfo_t info;
4047 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4048 if (unlikely(ret))
4049 return ret;
4050 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4051}
4052#endif
4053
4054
4055
4056
4057void kernel_sigaction(int sig, __sighandler_t action)
4058{
4059 spin_lock_irq(¤t->sighand->siglock);
4060 current->sighand->action[sig - 1].sa.sa_handler = action;
4061 if (action == SIG_IGN) {
4062 sigset_t mask;
4063
4064 sigemptyset(&mask);
4065 sigaddset(&mask, sig);
4066
4067 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4068 flush_sigqueue_mask(&mask, ¤t->pending);
4069 recalc_sigpending();
4070 }
4071 spin_unlock_irq(¤t->sighand->siglock);
4072}
4073EXPORT_SYMBOL(kernel_sigaction);
4074
4075void __weak sigaction_compat_abi(struct k_sigaction *act,
4076 struct k_sigaction *oact)
4077{
4078}
4079
4080int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4081{
4082 struct task_struct *p = current, *t;
4083 struct k_sigaction *k;
4084 sigset_t mask;
4085
4086 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4087 return -EINVAL;
4088
4089 k = &p->sighand->action[sig-1];
4090
4091 spin_lock_irq(&p->sighand->siglock);
4092 if (oact)
4093 *oact = *k;
4094
4095
4096
4097
4098
4099 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4100
4101
4102
4103
4104
4105
4106 if (act)
4107 act->sa.sa_flags &= UAPI_SA_FLAGS;
4108 if (oact)
4109 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4110
4111 sigaction_compat_abi(act, oact);
4112
4113 if (act) {
4114 sigdelsetmask(&act->sa.sa_mask,
4115 sigmask(SIGKILL) | sigmask(SIGSTOP));
4116 *k = *act;
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4129 sigemptyset(&mask);
4130 sigaddset(&mask, sig);
4131 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4132 for_each_thread(p, t)
4133 flush_sigqueue_mask(&mask, &t->pending);
4134 }
4135 }
4136
4137 spin_unlock_irq(&p->sighand->siglock);
4138 return 0;
4139}
4140
4141static int
4142do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4143 size_t min_ss_size)
4144{
4145 struct task_struct *t = current;
4146
4147 if (oss) {
4148 memset(oss, 0, sizeof(stack_t));
4149 oss->ss_sp = (void __user *) t->sas_ss_sp;
4150 oss->ss_size = t->sas_ss_size;
4151 oss->ss_flags = sas_ss_flags(sp) |
4152 (current->sas_ss_flags & SS_FLAG_BITS);
4153 }
4154
4155 if (ss) {
4156 void __user *ss_sp = ss->ss_sp;
4157 size_t ss_size = ss->ss_size;
4158 unsigned ss_flags = ss->ss_flags;
4159 int ss_mode;
4160
4161 if (unlikely(on_sig_stack(sp)))
4162 return -EPERM;
4163
4164 ss_mode = ss_flags & ~SS_FLAG_BITS;
4165 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4166 ss_mode != 0))
4167 return -EINVAL;
4168
4169 if (ss_mode == SS_DISABLE) {
4170 ss_size = 0;
4171 ss_sp = NULL;
4172 } else {
4173 if (unlikely(ss_size < min_ss_size))
4174 return -ENOMEM;
4175 }
4176
4177 t->sas_ss_sp = (unsigned long) ss_sp;
4178 t->sas_ss_size = ss_size;
4179 t->sas_ss_flags = ss_flags;
4180 }
4181 return 0;
4182}
4183
4184SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4185{
4186 stack_t new, old;
4187 int err;
4188 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4189 return -EFAULT;
4190 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4191 current_user_stack_pointer(),
4192 MINSIGSTKSZ);
4193 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4194 err = -EFAULT;
4195 return err;
4196}
4197
4198int restore_altstack(const stack_t __user *uss)
4199{
4200 stack_t new;
4201 if (copy_from_user(&new, uss, sizeof(stack_t)))
4202 return -EFAULT;
4203 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4204 MINSIGSTKSZ);
4205
4206 return 0;
4207}
4208
4209int __save_altstack(stack_t __user *uss, unsigned long sp)
4210{
4211 struct task_struct *t = current;
4212 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4213 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4214 __put_user(t->sas_ss_size, &uss->ss_size);
4215 return err;
4216}
4217
4218#ifdef CONFIG_COMPAT
4219static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4220 compat_stack_t __user *uoss_ptr)
4221{
4222 stack_t uss, uoss;
4223 int ret;
4224
4225 if (uss_ptr) {
4226 compat_stack_t uss32;
4227 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4228 return -EFAULT;
4229 uss.ss_sp = compat_ptr(uss32.ss_sp);
4230 uss.ss_flags = uss32.ss_flags;
4231 uss.ss_size = uss32.ss_size;
4232 }
4233 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4234 compat_user_stack_pointer(),
4235 COMPAT_MINSIGSTKSZ);
4236 if (ret >= 0 && uoss_ptr) {
4237 compat_stack_t old;
4238 memset(&old, 0, sizeof(old));
4239 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4240 old.ss_flags = uoss.ss_flags;
4241 old.ss_size = uoss.ss_size;
4242 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4243 ret = -EFAULT;
4244 }
4245 return ret;
4246}
4247
4248COMPAT_SYSCALL_DEFINE2(sigaltstack,
4249 const compat_stack_t __user *, uss_ptr,
4250 compat_stack_t __user *, uoss_ptr)
4251{
4252 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4253}
4254
4255int compat_restore_altstack(const compat_stack_t __user *uss)
4256{
4257 int err = do_compat_sigaltstack(uss, NULL);
4258
4259 return err == -EFAULT ? err : 0;
4260}
4261
4262int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4263{
4264 int err;
4265 struct task_struct *t = current;
4266 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4267 &uss->ss_sp) |
4268 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4269 __put_user(t->sas_ss_size, &uss->ss_size);
4270 return err;
4271}
4272#endif
4273
4274#ifdef __ARCH_WANT_SYS_SIGPENDING
4275
4276
4277
4278
4279
4280SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4281{
4282 sigset_t set;
4283
4284 if (sizeof(old_sigset_t) > sizeof(*uset))
4285 return -EINVAL;
4286
4287 do_sigpending(&set);
4288
4289 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4290 return -EFAULT;
4291
4292 return 0;
4293}
4294
4295#ifdef CONFIG_COMPAT
4296COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4297{
4298 sigset_t set;
4299
4300 do_sigpending(&set);
4301
4302 return put_user(set.sig[0], set32);
4303}
4304#endif
4305
4306#endif
4307
4308#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4320 old_sigset_t __user *, oset)
4321{
4322 old_sigset_t old_set, new_set;
4323 sigset_t new_blocked;
4324
4325 old_set = current->blocked.sig[0];
4326
4327 if (nset) {
4328 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4329 return -EFAULT;
4330
4331 new_blocked = current->blocked;
4332
4333 switch (how) {
4334 case SIG_BLOCK:
4335 sigaddsetmask(&new_blocked, new_set);
4336 break;
4337 case SIG_UNBLOCK:
4338 sigdelsetmask(&new_blocked, new_set);
4339 break;
4340 case SIG_SETMASK:
4341 new_blocked.sig[0] = new_set;
4342 break;
4343 default:
4344 return -EINVAL;
4345 }
4346
4347 set_current_blocked(&new_blocked);
4348 }
4349
4350 if (oset) {
4351 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4352 return -EFAULT;
4353 }
4354
4355 return 0;
4356}
4357#endif
4358
4359#ifndef CONFIG_ODD_RT_SIGACTION
4360
4361
4362
4363
4364
4365
4366
4367SYSCALL_DEFINE4(rt_sigaction, int, sig,
4368 const struct sigaction __user *, act,
4369 struct sigaction __user *, oact,
4370 size_t, sigsetsize)
4371{
4372 struct k_sigaction new_sa, old_sa;
4373 int ret;
4374
4375
4376 if (sigsetsize != sizeof(sigset_t))
4377 return -EINVAL;
4378
4379 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4380 return -EFAULT;
4381
4382 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4383 if (ret)
4384 return ret;
4385
4386 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4387 return -EFAULT;
4388
4389 return 0;
4390}
4391#ifdef CONFIG_COMPAT
4392COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4393 const struct compat_sigaction __user *, act,
4394 struct compat_sigaction __user *, oact,
4395 compat_size_t, sigsetsize)
4396{
4397 struct k_sigaction new_ka, old_ka;
4398#ifdef __ARCH_HAS_SA_RESTORER
4399 compat_uptr_t restorer;
4400#endif
4401 int ret;
4402
4403
4404 if (sigsetsize != sizeof(compat_sigset_t))
4405 return -EINVAL;
4406
4407 if (act) {
4408 compat_uptr_t handler;
4409 ret = get_user(handler, &act->sa_handler);
4410 new_ka.sa.sa_handler = compat_ptr(handler);
4411#ifdef __ARCH_HAS_SA_RESTORER
4412 ret |= get_user(restorer, &act->sa_restorer);
4413 new_ka.sa.sa_restorer = compat_ptr(restorer);
4414#endif
4415 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4416 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4417 if (ret)
4418 return -EFAULT;
4419 }
4420
4421 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4422 if (!ret && oact) {
4423 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4424 &oact->sa_handler);
4425 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4426 sizeof(oact->sa_mask));
4427 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4428#ifdef __ARCH_HAS_SA_RESTORER
4429 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4430 &oact->sa_restorer);
4431#endif
4432 }
4433 return ret;
4434}
4435#endif
4436#endif
4437
4438#ifdef CONFIG_OLD_SIGACTION
4439SYSCALL_DEFINE3(sigaction, int, sig,
4440 const struct old_sigaction __user *, act,
4441 struct old_sigaction __user *, oact)
4442{
4443 struct k_sigaction new_ka, old_ka;
4444 int ret;
4445
4446 if (act) {
4447 old_sigset_t mask;
4448 if (!access_ok(act, sizeof(*act)) ||
4449 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4450 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4451 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4452 __get_user(mask, &act->sa_mask))
4453 return -EFAULT;
4454#ifdef __ARCH_HAS_KA_RESTORER
4455 new_ka.ka_restorer = NULL;
4456#endif
4457 siginitset(&new_ka.sa.sa_mask, mask);
4458 }
4459
4460 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4461
4462 if (!ret && oact) {
4463 if (!access_ok(oact, sizeof(*oact)) ||
4464 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4465 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4466 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4467 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4468 return -EFAULT;
4469 }
4470
4471 return ret;
4472}
4473#endif
4474#ifdef CONFIG_COMPAT_OLD_SIGACTION
4475COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4476 const struct compat_old_sigaction __user *, act,
4477 struct compat_old_sigaction __user *, oact)
4478{
4479 struct k_sigaction new_ka, old_ka;
4480 int ret;
4481 compat_old_sigset_t mask;
4482 compat_uptr_t handler, restorer;
4483
4484 if (act) {
4485 if (!access_ok(act, sizeof(*act)) ||
4486 __get_user(handler, &act->sa_handler) ||
4487 __get_user(restorer, &act->sa_restorer) ||
4488 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4489 __get_user(mask, &act->sa_mask))
4490 return -EFAULT;
4491
4492#ifdef __ARCH_HAS_KA_RESTORER
4493 new_ka.ka_restorer = NULL;
4494#endif
4495 new_ka.sa.sa_handler = compat_ptr(handler);
4496 new_ka.sa.sa_restorer = compat_ptr(restorer);
4497 siginitset(&new_ka.sa.sa_mask, mask);
4498 }
4499
4500 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4501
4502 if (!ret && oact) {
4503 if (!access_ok(oact, sizeof(*oact)) ||
4504 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4505 &oact->sa_handler) ||
4506 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4507 &oact->sa_restorer) ||
4508 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4509 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4510 return -EFAULT;
4511 }
4512 return ret;
4513}
4514#endif
4515
4516#ifdef CONFIG_SGETMASK_SYSCALL
4517
4518
4519
4520
4521SYSCALL_DEFINE0(sgetmask)
4522{
4523
4524 return current->blocked.sig[0];
4525}
4526
4527SYSCALL_DEFINE1(ssetmask, int, newmask)
4528{
4529 int old = current->blocked.sig[0];
4530 sigset_t newset;
4531
4532 siginitset(&newset, newmask);
4533 set_current_blocked(&newset);
4534
4535 return old;
4536}
4537#endif
4538
4539#ifdef __ARCH_WANT_SYS_SIGNAL
4540
4541
4542
4543SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4544{
4545 struct k_sigaction new_sa, old_sa;
4546 int ret;
4547
4548 new_sa.sa.sa_handler = handler;
4549 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4550 sigemptyset(&new_sa.sa.sa_mask);
4551
4552 ret = do_sigaction(sig, &new_sa, &old_sa);
4553
4554 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4555}
4556#endif
4557
4558#ifdef __ARCH_WANT_SYS_PAUSE
4559
4560SYSCALL_DEFINE0(pause)
4561{
4562 while (!signal_pending(current)) {
4563 __set_current_state(TASK_INTERRUPTIBLE);
4564 schedule();
4565 }
4566 return -ERESTARTNOHAND;
4567}
4568
4569#endif
4570
4571static int sigsuspend(sigset_t *set)
4572{
4573 current->saved_sigmask = current->blocked;
4574 set_current_blocked(set);
4575
4576 while (!signal_pending(current)) {
4577 __set_current_state(TASK_INTERRUPTIBLE);
4578 schedule();
4579 }
4580 set_restore_sigmask();
4581 return -ERESTARTNOHAND;
4582}
4583
4584
4585
4586
4587
4588
4589
4590SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4591{
4592 sigset_t newset;
4593
4594
4595 if (sigsetsize != sizeof(sigset_t))
4596 return -EINVAL;
4597
4598 if (copy_from_user(&newset, unewset, sizeof(newset)))
4599 return -EFAULT;
4600 return sigsuspend(&newset);
4601}
4602
4603#ifdef CONFIG_COMPAT
4604COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4605{
4606 sigset_t newset;
4607
4608
4609 if (sigsetsize != sizeof(sigset_t))
4610 return -EINVAL;
4611
4612 if (get_compat_sigset(&newset, unewset))
4613 return -EFAULT;
4614 return sigsuspend(&newset);
4615}
4616#endif
4617
4618#ifdef CONFIG_OLD_SIGSUSPEND
4619SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4620{
4621 sigset_t blocked;
4622 siginitset(&blocked, mask);
4623 return sigsuspend(&blocked);
4624}
4625#endif
4626#ifdef CONFIG_OLD_SIGSUSPEND3
4627SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4628{
4629 sigset_t blocked;
4630 siginitset(&blocked, mask);
4631 return sigsuspend(&blocked);
4632}
4633#endif
4634
4635__weak const char *arch_vma_name(struct vm_area_struct *vma)
4636{
4637 return NULL;
4638}
4639
4640static inline void siginfo_buildtime_checks(void)
4641{
4642 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4643
4644
4645#define CHECK_OFFSET(field) \
4646 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4647
4648
4649 CHECK_OFFSET(si_pid);
4650 CHECK_OFFSET(si_uid);
4651
4652
4653 CHECK_OFFSET(si_tid);
4654 CHECK_OFFSET(si_overrun);
4655 CHECK_OFFSET(si_value);
4656
4657
4658 CHECK_OFFSET(si_pid);
4659 CHECK_OFFSET(si_uid);
4660 CHECK_OFFSET(si_value);
4661
4662
4663 CHECK_OFFSET(si_pid);
4664 CHECK_OFFSET(si_uid);
4665 CHECK_OFFSET(si_status);
4666 CHECK_OFFSET(si_utime);
4667 CHECK_OFFSET(si_stime);
4668
4669
4670 CHECK_OFFSET(si_addr);
4671 CHECK_OFFSET(si_trapno);
4672 CHECK_OFFSET(si_addr_lsb);
4673 CHECK_OFFSET(si_lower);
4674 CHECK_OFFSET(si_upper);
4675 CHECK_OFFSET(si_pkey);
4676 CHECK_OFFSET(si_perf_data);
4677 CHECK_OFFSET(si_perf_type);
4678
4679
4680 CHECK_OFFSET(si_band);
4681 CHECK_OFFSET(si_fd);
4682
4683
4684 CHECK_OFFSET(si_call_addr);
4685 CHECK_OFFSET(si_syscall);
4686 CHECK_OFFSET(si_arch);
4687#undef CHECK_OFFSET
4688
4689
4690 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4691 offsetof(struct siginfo, si_addr));
4692 if (sizeof(int) == sizeof(void __user *)) {
4693 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4694 sizeof(void __user *));
4695 } else {
4696 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4697 sizeof_field(struct siginfo, si_uid)) !=
4698 sizeof(void __user *));
4699 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4700 offsetof(struct siginfo, si_uid));
4701 }
4702#ifdef CONFIG_COMPAT
4703 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4704 offsetof(struct compat_siginfo, si_addr));
4705 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4706 sizeof(compat_uptr_t));
4707 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4708 sizeof_field(struct siginfo, si_pid));
4709#endif
4710}
4711
4712void __init signals_init(void)
4713{
4714 siginfo_buildtime_checks();
4715
4716 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4717}
4718
4719#ifdef CONFIG_KGDB_KDB
4720#include <linux/kdb.h>
4721
4722
4723
4724
4725
4726
4727void kdb_send_sig(struct task_struct *t, int sig)
4728{
4729 static struct task_struct *kdb_prev_t;
4730 int new_t, ret;
4731 if (!spin_trylock(&t->sighand->siglock)) {
4732 kdb_printf("Can't do kill command now.\n"
4733 "The sigmask lock is held somewhere else in "
4734 "kernel, try again later\n");
4735 return;
4736 }
4737 new_t = kdb_prev_t != t;
4738 kdb_prev_t = t;
4739 if (!task_is_running(t) && new_t) {
4740 spin_unlock(&t->sighand->siglock);
4741 kdb_printf("Process is not RUNNING, sending a signal from "
4742 "kdb risks deadlock\n"
4743 "on the run queue locks. "
4744 "The signal has _not_ been sent.\n"
4745 "Reissue the kill command if you want to risk "
4746 "the deadlock.\n");
4747 return;
4748 }
4749 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4750 spin_unlock(&t->sighand->siglock);
4751 if (ret)
4752 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4753 sig, t->pid);
4754 else
4755 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4756}
4757#endif
4758