1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/init.h>
17#include <linux/sched/mm.h>
18#include <linux/sched/user.h>
19#include <linux/sched/debug.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/sched/cputime.h>
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/proc_fs.h>
26#include <linux/tty.h>
27#include <linux/binfmts.h>
28#include <linux/coredump.h>
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/ptrace.h>
32#include <linux/signal.h>
33#include <linux/signalfd.h>
34#include <linux/ratelimit.h>
35#include <linux/tracehook.h>
36#include <linux/capability.h>
37#include <linux/freezer.h>
38#include <linux/pid_namespace.h>
39#include <linux/nsproxy.h>
40#include <linux/user_namespace.h>
41#include <linux/uprobes.h>
42#include <linux/compat.h>
43#include <linux/cn_proc.h>
44#include <linux/compiler.h>
45#include <linux/posix-timers.h>
46#include <linux/cgroup.h>
47#include <linux/audit.h>
48
49#define CREATE_TRACE_POINTS
50#include <trace/events/signal.h>
51
52#include <asm/param.h>
53#include <linux/uaccess.h>
54#include <asm/unistd.h>
55#include <asm/siginfo.h>
56#include <asm/cacheflush.h>
57#include <asm/syscall.h>
58
59
60
61
62
63static struct kmem_cache *sigqueue_cachep;
64
65int print_fatal_signals __read_mostly;
66
67static void __user *sig_handler(struct task_struct *t, int sig)
68{
69 return t->sighand->action[sig - 1].sa.sa_handler;
70}
71
72static inline bool sig_handler_ignored(void __user *handler, int sig)
73{
74
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
77}
78
79static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
80{
81 void __user *handler;
82
83 handler = sig_handler(t, sig);
84
85
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87 return true;
88
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 return true;
92
93
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
96 return true;
97
98 return sig_handler_ignored(handler, sig);
99}
100
101static bool sig_ignored(struct task_struct *t, int sig, bool force)
102{
103
104
105
106
107
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
109 return false;
110
111
112
113
114
115
116 if (t->ptrace && sig != SIGKILL)
117 return false;
118
119 return sig_task_ignored(t, sig, force);
120}
121
122
123
124
125
126static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127{
128 unsigned long ready;
129 long i;
130
131 switch (_NSIG_WORDS) {
132 default:
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
135 break;
136
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
141 break;
142
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
145 break;
146
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
148 }
149 return ready != 0;
150}
151
152#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153
154static bool recalc_sigpending_tsk(struct task_struct *t)
155{
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
161 return true;
162 }
163
164
165
166
167
168
169 return false;
170}
171
172
173
174
175
176void recalc_sigpending_and_wake(struct task_struct *t)
177{
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
180}
181
182void recalc_sigpending(void)
183{
184 if (!recalc_sigpending_tsk(current) && !freezing(current))
185 clear_thread_flag(TIF_SIGPENDING);
186
187}
188EXPORT_SYMBOL(recalc_sigpending);
189
190void calculate_sigpending(void)
191{
192
193
194
195 spin_lock_irq(¤t->sighand->siglock);
196 set_tsk_thread_flag(current, TIF_SIGPENDING);
197 recalc_sigpending();
198 spin_unlock_irq(¤t->sighand->siglock);
199}
200
201
202
203#define SYNCHRONOUS_MASK \
204 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
205 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
206
207int next_signal(struct sigpending *pending, sigset_t *mask)
208{
209 unsigned long i, *s, *m, x;
210 int sig = 0;
211
212 s = pending->signal.sig;
213 m = mask->sig;
214
215
216
217
218
219 x = *s &~ *m;
220 if (x) {
221 if (x & SYNCHRONOUS_MASK)
222 x &= SYNCHRONOUS_MASK;
223 sig = ffz(~x) + 1;
224 return sig;
225 }
226
227 switch (_NSIG_WORDS) {
228 default:
229 for (i = 1; i < _NSIG_WORDS; ++i) {
230 x = *++s &~ *++m;
231 if (!x)
232 continue;
233 sig = ffz(~x) + i*_NSIG_BPW + 1;
234 break;
235 }
236 break;
237
238 case 2:
239 x = s[1] &~ m[1];
240 if (!x)
241 break;
242 sig = ffz(~x) + _NSIG_BPW + 1;
243 break;
244
245 case 1:
246
247 break;
248 }
249
250 return sig;
251}
252
253static inline void print_dropped_signal(int sig)
254{
255 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
256
257 if (!print_fatal_signals)
258 return;
259
260 if (!__ratelimit(&ratelimit_state))
261 return;
262
263 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
264 current->comm, current->pid, sig);
265}
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
285{
286 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
287 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
288 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
289
290 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
291 return false;
292
293 if (mask & JOBCTL_STOP_SIGMASK)
294 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
295
296 task->jobctl |= mask;
297 return true;
298}
299
300
301
302
303
304
305
306
307
308
309
310
311
312void task_clear_jobctl_trapping(struct task_struct *task)
313{
314 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
315 task->jobctl &= ~JOBCTL_TRAPPING;
316 smp_mb();
317 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
318 }
319}
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
337{
338 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
339
340 if (mask & JOBCTL_STOP_PENDING)
341 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
342
343 task->jobctl &= ~mask;
344
345 if (!(task->jobctl & JOBCTL_PENDING_MASK))
346 task_clear_jobctl_trapping(task);
347}
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365static bool task_participate_group_stop(struct task_struct *task)
366{
367 struct signal_struct *sig = task->signal;
368 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
369
370 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
371
372 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
373
374 if (!consume)
375 return false;
376
377 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
378 sig->group_stop_count--;
379
380
381
382
383
384 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
385 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
386 return true;
387 }
388 return false;
389}
390
391void task_join_group_stop(struct task_struct *task)
392{
393 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
394 struct signal_struct *sig = current->signal;
395
396 if (sig->group_stop_count) {
397 sig->group_stop_count++;
398 mask |= JOBCTL_STOP_CONSUME;
399 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
400 return;
401
402
403 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
404}
405
406
407
408
409
410
411static struct sigqueue *
412__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
413 int override_rlimit, const unsigned int sigqueue_flags)
414{
415 struct sigqueue *q = NULL;
416 struct ucounts *ucounts = NULL;
417 long sigpending;
418
419
420
421
422
423
424
425
426
427 rcu_read_lock();
428 ucounts = task_ucounts(t);
429 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
430 rcu_read_unlock();
431 if (!sigpending)
432 return NULL;
433
434 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
436 } else {
437 print_dropped_signal(sig);
438 }
439
440 if (unlikely(q == NULL)) {
441 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
442 } else {
443 INIT_LIST_HEAD(&q->list);
444 q->flags = sigqueue_flags;
445 q->ucounts = ucounts;
446 }
447 return q;
448}
449
450static void __sigqueue_free(struct sigqueue *q)
451{
452 if (q->flags & SIGQUEUE_PREALLOC)
453 return;
454 if (q->ucounts) {
455 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
456 q->ucounts = NULL;
457 }
458 kmem_cache_free(sigqueue_cachep, q);
459}
460
461void flush_sigqueue(struct sigpending *queue)
462{
463 struct sigqueue *q;
464
465 sigemptyset(&queue->signal);
466 while (!list_empty(&queue->list)) {
467 q = list_entry(queue->list.next, struct sigqueue , list);
468 list_del_init(&q->list);
469 __sigqueue_free(q);
470 }
471}
472
473
474
475
476void flush_signals(struct task_struct *t)
477{
478 unsigned long flags;
479
480 spin_lock_irqsave(&t->sighand->siglock, flags);
481 clear_tsk_thread_flag(t, TIF_SIGPENDING);
482 flush_sigqueue(&t->pending);
483 flush_sigqueue(&t->signal->shared_pending);
484 spin_unlock_irqrestore(&t->sighand->siglock, flags);
485}
486EXPORT_SYMBOL(flush_signals);
487
488#ifdef CONFIG_POSIX_TIMERS
489static void __flush_itimer_signals(struct sigpending *pending)
490{
491 sigset_t signal, retain;
492 struct sigqueue *q, *n;
493
494 signal = pending->signal;
495 sigemptyset(&retain);
496
497 list_for_each_entry_safe(q, n, &pending->list, list) {
498 int sig = q->info.si_signo;
499
500 if (likely(q->info.si_code != SI_TIMER)) {
501 sigaddset(&retain, sig);
502 } else {
503 sigdelset(&signal, sig);
504 list_del_init(&q->list);
505 __sigqueue_free(q);
506 }
507 }
508
509 sigorsets(&pending->signal, &signal, &retain);
510}
511
512void flush_itimer_signals(void)
513{
514 struct task_struct *tsk = current;
515 unsigned long flags;
516
517 spin_lock_irqsave(&tsk->sighand->siglock, flags);
518 __flush_itimer_signals(&tsk->pending);
519 __flush_itimer_signals(&tsk->signal->shared_pending);
520 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
521}
522#endif
523
524void ignore_signals(struct task_struct *t)
525{
526 int i;
527
528 for (i = 0; i < _NSIG; ++i)
529 t->sighand->action[i].sa.sa_handler = SIG_IGN;
530
531 flush_signals(t);
532}
533
534
535
536
537
538void
539flush_signal_handlers(struct task_struct *t, int force_default)
540{
541 int i;
542 struct k_sigaction *ka = &t->sighand->action[0];
543 for (i = _NSIG ; i != 0 ; i--) {
544 if (force_default || ka->sa.sa_handler != SIG_IGN)
545 ka->sa.sa_handler = SIG_DFL;
546 ka->sa.sa_flags = 0;
547#ifdef __ARCH_HAS_SA_RESTORER
548 ka->sa.sa_restorer = NULL;
549#endif
550 sigemptyset(&ka->sa.sa_mask);
551 ka++;
552 }
553}
554
555bool unhandled_signal(struct task_struct *tsk, int sig)
556{
557 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558 if (is_global_init(tsk))
559 return true;
560
561 if (handler != SIG_IGN && handler != SIG_DFL)
562 return false;
563
564
565 return !tsk->ptrace;
566}
567
568static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
569 bool *resched_timer)
570{
571 struct sigqueue *q, *first = NULL;
572
573
574
575
576
577 list_for_each_entry(q, &list->list, list) {
578 if (q->info.si_signo == sig) {
579 if (first)
580 goto still_pending;
581 first = q;
582 }
583 }
584
585 sigdelset(&list->signal, sig);
586
587 if (first) {
588still_pending:
589 list_del_init(&first->list);
590 copy_siginfo(info, &first->info);
591
592 *resched_timer =
593 (first->flags & SIGQUEUE_PREALLOC) &&
594 (info->si_code == SI_TIMER) &&
595 (info->si_sys_private);
596
597 __sigqueue_free(first);
598 } else {
599
600
601
602
603
604 clear_siginfo(info);
605 info->si_signo = sig;
606 info->si_errno = 0;
607 info->si_code = SI_USER;
608 info->si_pid = 0;
609 info->si_uid = 0;
610 }
611}
612
613static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
614 kernel_siginfo_t *info, bool *resched_timer)
615{
616 int sig = next_signal(pending, mask);
617
618 if (sig)
619 collect_signal(sig, pending, info, resched_timer);
620 return sig;
621}
622
623
624
625
626
627
628
629int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
630{
631 bool resched_timer = false;
632 int signr;
633
634
635
636
637 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
638 if (!signr) {
639 signr = __dequeue_signal(&tsk->signal->shared_pending,
640 mask, info, &resched_timer);
641#ifdef CONFIG_POSIX_TIMERS
642
643
644
645
646
647
648
649
650
651
652
653
654
655 if (unlikely(signr == SIGALRM)) {
656 struct hrtimer *tmr = &tsk->signal->real_timer;
657
658 if (!hrtimer_is_queued(tmr) &&
659 tsk->signal->it_real_incr != 0) {
660 hrtimer_forward(tmr, tmr->base->get_time(),
661 tsk->signal->it_real_incr);
662 hrtimer_restart(tmr);
663 }
664 }
665#endif
666 }
667
668 recalc_sigpending();
669 if (!signr)
670 return 0;
671
672 if (unlikely(sig_kernel_stop(signr))) {
673
674
675
676
677
678
679
680
681
682
683
684
685 current->jobctl |= JOBCTL_STOP_DEQUEUED;
686 }
687#ifdef CONFIG_POSIX_TIMERS
688 if (resched_timer) {
689
690
691
692
693
694
695 spin_unlock(&tsk->sighand->siglock);
696 posixtimer_rearm(info);
697 spin_lock(&tsk->sighand->siglock);
698
699
700 info->si_sys_private = 0;
701 }
702#endif
703 return signr;
704}
705EXPORT_SYMBOL_GPL(dequeue_signal);
706
707static int dequeue_synchronous_signal(kernel_siginfo_t *info)
708{
709 struct task_struct *tsk = current;
710 struct sigpending *pending = &tsk->pending;
711 struct sigqueue *q, *sync = NULL;
712
713
714
715
716 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
717 return 0;
718
719
720
721
722 list_for_each_entry(q, &pending->list, list) {
723
724 if ((q->info.si_code > SI_USER) &&
725 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
726 sync = q;
727 goto next;
728 }
729 }
730 return 0;
731next:
732
733
734
735 list_for_each_entry_continue(q, &pending->list, list) {
736 if (q->info.si_signo == sync->info.si_signo)
737 goto still_pending;
738 }
739
740 sigdelset(&pending->signal, sync->info.si_signo);
741 recalc_sigpending();
742still_pending:
743 list_del_init(&sync->list);
744 copy_siginfo(info, &sync->info);
745 __sigqueue_free(sync);
746 return info->si_signo;
747}
748
749
750
751
752
753
754
755
756
757
758
759
760void signal_wake_up_state(struct task_struct *t, unsigned int state)
761{
762 set_tsk_thread_flag(t, TIF_SIGPENDING);
763
764
765
766
767
768
769
770 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
771 kick_process(t);
772}
773
774
775
776
777
778
779
780static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
781{
782 struct sigqueue *q, *n;
783 sigset_t m;
784
785 sigandsets(&m, mask, &s->signal);
786 if (sigisemptyset(&m))
787 return;
788
789 sigandnsets(&s->signal, &s->signal, mask);
790 list_for_each_entry_safe(q, n, &s->list, list) {
791 if (sigismember(mask, q->info.si_signo)) {
792 list_del_init(&q->list);
793 __sigqueue_free(q);
794 }
795 }
796}
797
798static inline int is_si_special(const struct kernel_siginfo *info)
799{
800 return info <= SEND_SIG_PRIV;
801}
802
803static inline bool si_fromuser(const struct kernel_siginfo *info)
804{
805 return info == SEND_SIG_NOINFO ||
806 (!is_si_special(info) && SI_FROMUSER(info));
807}
808
809
810
811
812static bool kill_ok_by_cred(struct task_struct *t)
813{
814 const struct cred *cred = current_cred();
815 const struct cred *tcred = __task_cred(t);
816
817 return uid_eq(cred->euid, tcred->suid) ||
818 uid_eq(cred->euid, tcred->uid) ||
819 uid_eq(cred->uid, tcred->suid) ||
820 uid_eq(cred->uid, tcred->uid) ||
821 ns_capable(tcred->user_ns, CAP_KILL);
822}
823
824
825
826
827
828static int check_kill_permission(int sig, struct kernel_siginfo *info,
829 struct task_struct *t)
830{
831 struct pid *sid;
832 int error;
833
834 if (!valid_signal(sig))
835 return -EINVAL;
836
837 if (!si_fromuser(info))
838 return 0;
839
840 error = audit_signal_info(sig, t);
841 if (error)
842 return error;
843
844 if (!same_thread_group(current, t) &&
845 !kill_ok_by_cred(t)) {
846 switch (sig) {
847 case SIGCONT:
848 sid = task_session(t);
849
850
851
852
853 if (!sid || sid == task_session(current))
854 break;
855 fallthrough;
856 default:
857 return -EPERM;
858 }
859 }
860
861 return security_task_kill(t, info, sig, NULL);
862}
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881static void ptrace_trap_notify(struct task_struct *t)
882{
883 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
884 assert_spin_locked(&t->sighand->siglock);
885
886 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
887 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
888}
889
890
891
892
893
894
895
896
897
898
899
900static bool prepare_signal(int sig, struct task_struct *p, bool force)
901{
902 struct signal_struct *signal = p->signal;
903 struct task_struct *t;
904 sigset_t flush;
905
906 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
907 if (!(signal->flags & SIGNAL_GROUP_EXIT))
908 return sig == SIGKILL;
909
910
911
912 } else if (sig_kernel_stop(sig)) {
913
914
915
916 siginitset(&flush, sigmask(SIGCONT));
917 flush_sigqueue_mask(&flush, &signal->shared_pending);
918 for_each_thread(p, t)
919 flush_sigqueue_mask(&flush, &t->pending);
920 } else if (sig == SIGCONT) {
921 unsigned int why;
922
923
924
925 siginitset(&flush, SIG_KERNEL_STOP_MASK);
926 flush_sigqueue_mask(&flush, &signal->shared_pending);
927 for_each_thread(p, t) {
928 flush_sigqueue_mask(&flush, &t->pending);
929 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
930 if (likely(!(t->ptrace & PT_SEIZED)))
931 wake_up_state(t, __TASK_STOPPED);
932 else
933 ptrace_trap_notify(t);
934 }
935
936
937
938
939
940
941
942
943
944 why = 0;
945 if (signal->flags & SIGNAL_STOP_STOPPED)
946 why |= SIGNAL_CLD_CONTINUED;
947 else if (signal->group_stop_count)
948 why |= SIGNAL_CLD_STOPPED;
949
950 if (why) {
951
952
953
954
955
956 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
957 signal->group_stop_count = 0;
958 signal->group_exit_code = 0;
959 }
960 }
961
962 return !sig_ignored(p, sig, force);
963}
964
965
966
967
968
969
970
971
972
973static inline bool wants_signal(int sig, struct task_struct *p)
974{
975 if (sigismember(&p->blocked, sig))
976 return false;
977
978 if (p->flags & PF_EXITING)
979 return false;
980
981 if (sig == SIGKILL)
982 return true;
983
984 if (task_is_stopped_or_traced(p))
985 return false;
986
987 return task_curr(p) || !task_sigpending(p);
988}
989
990static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
991{
992 struct signal_struct *signal = p->signal;
993 struct task_struct *t;
994
995
996
997
998
999
1000
1001 if (wants_signal(sig, p))
1002 t = p;
1003 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1004
1005
1006
1007
1008 return;
1009 else {
1010
1011
1012
1013 t = signal->curr_target;
1014 while (!wants_signal(sig, t)) {
1015 t = next_thread(t);
1016 if (t == signal->curr_target)
1017
1018
1019
1020
1021
1022 return;
1023 }
1024 signal->curr_target = t;
1025 }
1026
1027
1028
1029
1030
1031 if (sig_fatal(p, sig) &&
1032 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1033 !sigismember(&t->real_blocked, sig) &&
1034 (sig == SIGKILL || !p->ptrace)) {
1035
1036
1037
1038 if (!sig_kernel_coredump(sig)) {
1039
1040
1041
1042
1043
1044
1045 signal->flags = SIGNAL_GROUP_EXIT;
1046 signal->group_exit_code = sig;
1047 signal->group_stop_count = 0;
1048 t = p;
1049 do {
1050 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1051 sigaddset(&t->pending.signal, SIGKILL);
1052 signal_wake_up(t, 1);
1053 } while_each_thread(p, t);
1054 return;
1055 }
1056 }
1057
1058
1059
1060
1061
1062 signal_wake_up(t, sig == SIGKILL);
1063 return;
1064}
1065
1066static inline bool legacy_queue(struct sigpending *signals, int sig)
1067{
1068 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1069}
1070
1071static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1072 enum pid_type type, bool force)
1073{
1074 struct sigpending *pending;
1075 struct sigqueue *q;
1076 int override_rlimit;
1077 int ret = 0, result;
1078
1079 assert_spin_locked(&t->sighand->siglock);
1080
1081 result = TRACE_SIGNAL_IGNORED;
1082 if (!prepare_signal(sig, t, force))
1083 goto ret;
1084
1085 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1086
1087
1088
1089
1090
1091 result = TRACE_SIGNAL_ALREADY_PENDING;
1092 if (legacy_queue(pending, sig))
1093 goto ret;
1094
1095 result = TRACE_SIGNAL_DELIVERED;
1096
1097
1098
1099 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1100 goto out_set;
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111 if (sig < SIGRTMIN)
1112 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1113 else
1114 override_rlimit = 0;
1115
1116 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1117
1118 if (q) {
1119 list_add_tail(&q->list, &pending->list);
1120 switch ((unsigned long) info) {
1121 case (unsigned long) SEND_SIG_NOINFO:
1122 clear_siginfo(&q->info);
1123 q->info.si_signo = sig;
1124 q->info.si_errno = 0;
1125 q->info.si_code = SI_USER;
1126 q->info.si_pid = task_tgid_nr_ns(current,
1127 task_active_pid_ns(t));
1128 rcu_read_lock();
1129 q->info.si_uid =
1130 from_kuid_munged(task_cred_xxx(t, user_ns),
1131 current_uid());
1132 rcu_read_unlock();
1133 break;
1134 case (unsigned long) SEND_SIG_PRIV:
1135 clear_siginfo(&q->info);
1136 q->info.si_signo = sig;
1137 q->info.si_errno = 0;
1138 q->info.si_code = SI_KERNEL;
1139 q->info.si_pid = 0;
1140 q->info.si_uid = 0;
1141 break;
1142 default:
1143 copy_siginfo(&q->info, info);
1144 break;
1145 }
1146 } else if (!is_si_special(info) &&
1147 sig >= SIGRTMIN && info->si_code != SI_USER) {
1148
1149
1150
1151
1152
1153 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1154 ret = -EAGAIN;
1155 goto ret;
1156 } else {
1157
1158
1159
1160
1161 result = TRACE_SIGNAL_LOSE_INFO;
1162 }
1163
1164out_set:
1165 signalfd_notify(t, sig);
1166 sigaddset(&pending->signal, sig);
1167
1168
1169 if (type > PIDTYPE_TGID) {
1170 struct multiprocess_signals *delayed;
1171 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1172 sigset_t *signal = &delayed->signal;
1173
1174 if (sig == SIGCONT)
1175 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1176 else if (sig_kernel_stop(sig))
1177 sigdelset(signal, SIGCONT);
1178 sigaddset(signal, sig);
1179 }
1180 }
1181
1182 complete_signal(sig, t, type);
1183ret:
1184 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1185 return ret;
1186}
1187
1188static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1189{
1190 bool ret = false;
1191 switch (siginfo_layout(info->si_signo, info->si_code)) {
1192 case SIL_KILL:
1193 case SIL_CHLD:
1194 case SIL_RT:
1195 ret = true;
1196 break;
1197 case SIL_TIMER:
1198 case SIL_POLL:
1199 case SIL_FAULT:
1200 case SIL_FAULT_TRAPNO:
1201 case SIL_FAULT_MCEERR:
1202 case SIL_FAULT_BNDERR:
1203 case SIL_FAULT_PKUERR:
1204 case SIL_FAULT_PERF_EVENT:
1205 case SIL_SYS:
1206 ret = false;
1207 break;
1208 }
1209 return ret;
1210}
1211
1212static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1213 enum pid_type type)
1214{
1215
1216 bool force = false;
1217
1218 if (info == SEND_SIG_NOINFO) {
1219
1220 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1221 } else if (info == SEND_SIG_PRIV) {
1222
1223 force = true;
1224 } else if (has_si_pid_and_uid(info)) {
1225
1226 struct user_namespace *t_user_ns;
1227
1228 rcu_read_lock();
1229 t_user_ns = task_cred_xxx(t, user_ns);
1230 if (current_user_ns() != t_user_ns) {
1231 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1232 info->si_uid = from_kuid_munged(t_user_ns, uid);
1233 }
1234 rcu_read_unlock();
1235
1236
1237 force = (info->si_code == SI_KERNEL);
1238
1239
1240 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1241 info->si_pid = 0;
1242 force = true;
1243 }
1244 }
1245 return __send_signal(sig, info, t, type, force);
1246}
1247
1248static void print_fatal_signal(int signr)
1249{
1250 struct pt_regs *regs = signal_pt_regs();
1251 pr_info("potentially unexpected fatal signal %d.\n", signr);
1252
1253#if defined(__i386__) && !defined(__arch_um__)
1254 pr_info("code at %08lx: ", regs->ip);
1255 {
1256 int i;
1257 for (i = 0; i < 16; i++) {
1258 unsigned char insn;
1259
1260 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1261 break;
1262 pr_cont("%02x ", insn);
1263 }
1264 }
1265 pr_cont("\n");
1266#endif
1267 preempt_disable();
1268 show_regs(regs);
1269 preempt_enable();
1270}
1271
1272static int __init setup_print_fatal_signals(char *str)
1273{
1274 get_option (&str, &print_fatal_signals);
1275
1276 return 1;
1277}
1278
1279__setup("print-fatal-signals=", setup_print_fatal_signals);
1280
1281int
1282__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1283{
1284 return send_signal(sig, info, p, PIDTYPE_TGID);
1285}
1286
1287int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1288 enum pid_type type)
1289{
1290 unsigned long flags;
1291 int ret = -ESRCH;
1292
1293 if (lock_task_sighand(p, &flags)) {
1294 ret = send_signal(sig, info, p, type);
1295 unlock_task_sighand(p, &flags);
1296 }
1297
1298 return ret;
1299}
1300
1301enum sig_handler {
1302 HANDLER_CURRENT,
1303 HANDLER_SIG_DFL,
1304 HANDLER_EXIT,
1305};
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318static int
1319force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1320 enum sig_handler handler)
1321{
1322 unsigned long int flags;
1323 int ret, blocked, ignored;
1324 struct k_sigaction *action;
1325 int sig = info->si_signo;
1326
1327 spin_lock_irqsave(&t->sighand->siglock, flags);
1328 action = &t->sighand->action[sig-1];
1329 ignored = action->sa.sa_handler == SIG_IGN;
1330 blocked = sigismember(&t->blocked, sig);
1331 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1332 action->sa.sa_handler = SIG_DFL;
1333 if (handler == HANDLER_EXIT)
1334 action->sa.sa_flags |= SA_IMMUTABLE;
1335 if (blocked) {
1336 sigdelset(&t->blocked, sig);
1337 recalc_sigpending_and_wake(t);
1338 }
1339 }
1340
1341
1342
1343
1344 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1345 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1346 ret = send_signal(sig, info, t, PIDTYPE_PID);
1347 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1348
1349 return ret;
1350}
1351
1352int force_sig_info(struct kernel_siginfo *info)
1353{
1354 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1355}
1356
1357
1358
1359
1360int zap_other_threads(struct task_struct *p)
1361{
1362 struct task_struct *t = p;
1363 int count = 0;
1364
1365 p->signal->group_stop_count = 0;
1366
1367 while_each_thread(p, t) {
1368 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1369 count++;
1370
1371
1372 if (t->exit_state)
1373 continue;
1374 sigaddset(&t->pending.signal, SIGKILL);
1375 signal_wake_up(t, 1);
1376 }
1377
1378 return count;
1379}
1380
1381struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1382 unsigned long *flags)
1383{
1384 struct sighand_struct *sighand;
1385
1386 rcu_read_lock();
1387 for (;;) {
1388 sighand = rcu_dereference(tsk->sighand);
1389 if (unlikely(sighand == NULL))
1390 break;
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403 spin_lock_irqsave(&sighand->siglock, *flags);
1404 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1405 break;
1406 spin_unlock_irqrestore(&sighand->siglock, *flags);
1407 }
1408 rcu_read_unlock();
1409
1410 return sighand;
1411}
1412
1413#ifdef CONFIG_LOCKDEP
1414void lockdep_assert_task_sighand_held(struct task_struct *task)
1415{
1416 struct sighand_struct *sighand;
1417
1418 rcu_read_lock();
1419 sighand = rcu_dereference(task->sighand);
1420 if (sighand)
1421 lockdep_assert_held(&sighand->siglock);
1422 else
1423 WARN_ON_ONCE(1);
1424 rcu_read_unlock();
1425}
1426#endif
1427
1428
1429
1430
1431int group_send_sig_info(int sig, struct kernel_siginfo *info,
1432 struct task_struct *p, enum pid_type type)
1433{
1434 int ret;
1435
1436 rcu_read_lock();
1437 ret = check_kill_permission(sig, info, p);
1438 rcu_read_unlock();
1439
1440 if (!ret && sig)
1441 ret = do_send_sig_info(sig, info, p, type);
1442
1443 return ret;
1444}
1445
1446
1447
1448
1449
1450
1451int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1452{
1453 struct task_struct *p = NULL;
1454 int retval, success;
1455
1456 success = 0;
1457 retval = -ESRCH;
1458 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1459 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1460 success |= !err;
1461 retval = err;
1462 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1463 return success ? 0 : retval;
1464}
1465
1466int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1467{
1468 int error = -ESRCH;
1469 struct task_struct *p;
1470
1471 for (;;) {
1472 rcu_read_lock();
1473 p = pid_task(pid, PIDTYPE_PID);
1474 if (p)
1475 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1476 rcu_read_unlock();
1477 if (likely(!p || error != -ESRCH))
1478 return error;
1479
1480
1481
1482
1483
1484
1485 }
1486}
1487
1488static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1489{
1490 int error;
1491 rcu_read_lock();
1492 error = kill_pid_info(sig, info, find_vpid(pid));
1493 rcu_read_unlock();
1494 return error;
1495}
1496
1497static inline bool kill_as_cred_perm(const struct cred *cred,
1498 struct task_struct *target)
1499{
1500 const struct cred *pcred = __task_cred(target);
1501
1502 return uid_eq(cred->euid, pcred->suid) ||
1503 uid_eq(cred->euid, pcred->uid) ||
1504 uid_eq(cred->uid, pcred->suid) ||
1505 uid_eq(cred->uid, pcred->uid);
1506}
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1534 struct pid *pid, const struct cred *cred)
1535{
1536 struct kernel_siginfo info;
1537 struct task_struct *p;
1538 unsigned long flags;
1539 int ret = -EINVAL;
1540
1541 if (!valid_signal(sig))
1542 return ret;
1543
1544 clear_siginfo(&info);
1545 info.si_signo = sig;
1546 info.si_errno = errno;
1547 info.si_code = SI_ASYNCIO;
1548 *((sigval_t *)&info.si_pid) = addr;
1549
1550 rcu_read_lock();
1551 p = pid_task(pid, PIDTYPE_PID);
1552 if (!p) {
1553 ret = -ESRCH;
1554 goto out_unlock;
1555 }
1556 if (!kill_as_cred_perm(cred, p)) {
1557 ret = -EPERM;
1558 goto out_unlock;
1559 }
1560 ret = security_task_kill(p, &info, sig, cred);
1561 if (ret)
1562 goto out_unlock;
1563
1564 if (sig) {
1565 if (lock_task_sighand(p, &flags)) {
1566 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1567 unlock_task_sighand(p, &flags);
1568 } else
1569 ret = -ESRCH;
1570 }
1571out_unlock:
1572 rcu_read_unlock();
1573 return ret;
1574}
1575EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1576
1577
1578
1579
1580
1581
1582
1583
1584static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1585{
1586 int ret;
1587
1588 if (pid > 0)
1589 return kill_proc_info(sig, info, pid);
1590
1591
1592 if (pid == INT_MIN)
1593 return -ESRCH;
1594
1595 read_lock(&tasklist_lock);
1596 if (pid != -1) {
1597 ret = __kill_pgrp_info(sig, info,
1598 pid ? find_vpid(-pid) : task_pgrp(current));
1599 } else {
1600 int retval = 0, count = 0;
1601 struct task_struct * p;
1602
1603 for_each_process(p) {
1604 if (task_pid_vnr(p) > 1 &&
1605 !same_thread_group(p, current)) {
1606 int err = group_send_sig_info(sig, info, p,
1607 PIDTYPE_MAX);
1608 ++count;
1609 if (err != -EPERM)
1610 retval = err;
1611 }
1612 }
1613 ret = count ? retval : -ESRCH;
1614 }
1615 read_unlock(&tasklist_lock);
1616
1617 return ret;
1618}
1619
1620
1621
1622
1623
1624int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1625{
1626
1627
1628
1629
1630 if (!valid_signal(sig))
1631 return -EINVAL;
1632
1633 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1634}
1635EXPORT_SYMBOL(send_sig_info);
1636
1637#define __si_special(priv) \
1638 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1639
1640int
1641send_sig(int sig, struct task_struct *p, int priv)
1642{
1643 return send_sig_info(sig, __si_special(priv), p);
1644}
1645EXPORT_SYMBOL(send_sig);
1646
1647void force_sig(int sig)
1648{
1649 struct kernel_siginfo info;
1650
1651 clear_siginfo(&info);
1652 info.si_signo = sig;
1653 info.si_errno = 0;
1654 info.si_code = SI_KERNEL;
1655 info.si_pid = 0;
1656 info.si_uid = 0;
1657 force_sig_info(&info);
1658}
1659EXPORT_SYMBOL(force_sig);
1660
1661void force_fatal_sig(int sig)
1662{
1663 struct kernel_siginfo info;
1664
1665 clear_siginfo(&info);
1666 info.si_signo = sig;
1667 info.si_errno = 0;
1668 info.si_code = SI_KERNEL;
1669 info.si_pid = 0;
1670 info.si_uid = 0;
1671 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1672}
1673
1674void force_exit_sig(int sig)
1675{
1676 struct kernel_siginfo info;
1677
1678 clear_siginfo(&info);
1679 info.si_signo = sig;
1680 info.si_errno = 0;
1681 info.si_code = SI_KERNEL;
1682 info.si_pid = 0;
1683 info.si_uid = 0;
1684 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1685}
1686
1687
1688
1689
1690
1691
1692
1693void force_sigsegv(int sig)
1694{
1695 if (sig == SIGSEGV)
1696 force_fatal_sig(SIGSEGV);
1697 else
1698 force_sig(SIGSEGV);
1699}
1700
1701int force_sig_fault_to_task(int sig, int code, void __user *addr
1702 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1703 , struct task_struct *t)
1704{
1705 struct kernel_siginfo info;
1706
1707 clear_siginfo(&info);
1708 info.si_signo = sig;
1709 info.si_errno = 0;
1710 info.si_code = code;
1711 info.si_addr = addr;
1712#ifdef __ia64__
1713 info.si_imm = imm;
1714 info.si_flags = flags;
1715 info.si_isr = isr;
1716#endif
1717 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1718}
1719
1720int force_sig_fault(int sig, int code, void __user *addr
1721 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1722{
1723 return force_sig_fault_to_task(sig, code, addr
1724 ___ARCH_SI_IA64(imm, flags, isr), current);
1725}
1726
1727int send_sig_fault(int sig, int code, void __user *addr
1728 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1729 , struct task_struct *t)
1730{
1731 struct kernel_siginfo info;
1732
1733 clear_siginfo(&info);
1734 info.si_signo = sig;
1735 info.si_errno = 0;
1736 info.si_code = code;
1737 info.si_addr = addr;
1738#ifdef __ia64__
1739 info.si_imm = imm;
1740 info.si_flags = flags;
1741 info.si_isr = isr;
1742#endif
1743 return send_sig_info(info.si_signo, &info, t);
1744}
1745
1746int force_sig_mceerr(int code, void __user *addr, short lsb)
1747{
1748 struct kernel_siginfo info;
1749
1750 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1751 clear_siginfo(&info);
1752 info.si_signo = SIGBUS;
1753 info.si_errno = 0;
1754 info.si_code = code;
1755 info.si_addr = addr;
1756 info.si_addr_lsb = lsb;
1757 return force_sig_info(&info);
1758}
1759
1760int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1761{
1762 struct kernel_siginfo info;
1763
1764 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1765 clear_siginfo(&info);
1766 info.si_signo = SIGBUS;
1767 info.si_errno = 0;
1768 info.si_code = code;
1769 info.si_addr = addr;
1770 info.si_addr_lsb = lsb;
1771 return send_sig_info(info.si_signo, &info, t);
1772}
1773EXPORT_SYMBOL(send_sig_mceerr);
1774
1775int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1776{
1777 struct kernel_siginfo info;
1778
1779 clear_siginfo(&info);
1780 info.si_signo = SIGSEGV;
1781 info.si_errno = 0;
1782 info.si_code = SEGV_BNDERR;
1783 info.si_addr = addr;
1784 info.si_lower = lower;
1785 info.si_upper = upper;
1786 return force_sig_info(&info);
1787}
1788
1789#ifdef SEGV_PKUERR
1790int force_sig_pkuerr(void __user *addr, u32 pkey)
1791{
1792 struct kernel_siginfo info;
1793
1794 clear_siginfo(&info);
1795 info.si_signo = SIGSEGV;
1796 info.si_errno = 0;
1797 info.si_code = SEGV_PKUERR;
1798 info.si_addr = addr;
1799 info.si_pkey = pkey;
1800 return force_sig_info(&info);
1801}
1802#endif
1803
1804int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1805{
1806 struct kernel_siginfo info;
1807
1808 clear_siginfo(&info);
1809 info.si_signo = SIGTRAP;
1810 info.si_errno = 0;
1811 info.si_code = TRAP_PERF;
1812 info.si_addr = addr;
1813 info.si_perf_data = sig_data;
1814 info.si_perf_type = type;
1815
1816 return force_sig_info(&info);
1817}
1818
1819
1820
1821
1822
1823
1824
1825
1826int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1827{
1828 struct kernel_siginfo info;
1829
1830 clear_siginfo(&info);
1831 info.si_signo = SIGSYS;
1832 info.si_code = SYS_SECCOMP;
1833 info.si_call_addr = (void __user *)KSTK_EIP(current);
1834 info.si_errno = reason;
1835 info.si_arch = syscall_get_arch(current);
1836 info.si_syscall = syscall;
1837 return force_sig_info_to_task(&info, current,
1838 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1839}
1840
1841
1842
1843
1844int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1845{
1846 struct kernel_siginfo info;
1847
1848 clear_siginfo(&info);
1849 info.si_signo = SIGTRAP;
1850 info.si_errno = errno;
1851 info.si_code = TRAP_HWBKPT;
1852 info.si_addr = addr;
1853 return force_sig_info(&info);
1854}
1855
1856
1857
1858
1859int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1860{
1861 struct kernel_siginfo info;
1862
1863 clear_siginfo(&info);
1864 info.si_signo = sig;
1865 info.si_errno = 0;
1866 info.si_code = code;
1867 info.si_addr = addr;
1868 info.si_trapno = trapno;
1869 return force_sig_info(&info);
1870}
1871
1872
1873
1874
1875int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1876 struct task_struct *t)
1877{
1878 struct kernel_siginfo info;
1879
1880 clear_siginfo(&info);
1881 info.si_signo = sig;
1882 info.si_errno = 0;
1883 info.si_code = code;
1884 info.si_addr = addr;
1885 info.si_trapno = trapno;
1886 return send_sig_info(info.si_signo, &info, t);
1887}
1888
1889int kill_pgrp(struct pid *pid, int sig, int priv)
1890{
1891 int ret;
1892
1893 read_lock(&tasklist_lock);
1894 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1895 read_unlock(&tasklist_lock);
1896
1897 return ret;
1898}
1899EXPORT_SYMBOL(kill_pgrp);
1900
1901int kill_pid(struct pid *pid, int sig, int priv)
1902{
1903 return kill_pid_info(sig, __si_special(priv), pid);
1904}
1905EXPORT_SYMBOL(kill_pid);
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916struct sigqueue *sigqueue_alloc(void)
1917{
1918 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1919}
1920
1921void sigqueue_free(struct sigqueue *q)
1922{
1923 unsigned long flags;
1924 spinlock_t *lock = ¤t->sighand->siglock;
1925
1926 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1927
1928
1929
1930
1931
1932 spin_lock_irqsave(lock, flags);
1933 q->flags &= ~SIGQUEUE_PREALLOC;
1934
1935
1936
1937
1938 if (!list_empty(&q->list))
1939 q = NULL;
1940 spin_unlock_irqrestore(lock, flags);
1941
1942 if (q)
1943 __sigqueue_free(q);
1944}
1945
1946int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1947{
1948 int sig = q->info.si_signo;
1949 struct sigpending *pending;
1950 struct task_struct *t;
1951 unsigned long flags;
1952 int ret, result;
1953
1954 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1955
1956 ret = -1;
1957 rcu_read_lock();
1958 t = pid_task(pid, type);
1959 if (!t || !likely(lock_task_sighand(t, &flags)))
1960 goto ret;
1961
1962 ret = 1;
1963 result = TRACE_SIGNAL_IGNORED;
1964 if (!prepare_signal(sig, t, false))
1965 goto out;
1966
1967 ret = 0;
1968 if (unlikely(!list_empty(&q->list))) {
1969
1970
1971
1972
1973 BUG_ON(q->info.si_code != SI_TIMER);
1974 q->info.si_overrun++;
1975 result = TRACE_SIGNAL_ALREADY_PENDING;
1976 goto out;
1977 }
1978 q->info.si_overrun = 0;
1979
1980 signalfd_notify(t, sig);
1981 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1982 list_add_tail(&q->list, &pending->list);
1983 sigaddset(&pending->signal, sig);
1984 complete_signal(sig, t, type);
1985 result = TRACE_SIGNAL_DELIVERED;
1986out:
1987 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1988 unlock_task_sighand(t, &flags);
1989ret:
1990 rcu_read_unlock();
1991 return ret;
1992}
1993
1994static void do_notify_pidfd(struct task_struct *task)
1995{
1996 struct pid *pid;
1997
1998 WARN_ON(task->exit_state == 0);
1999 pid = task_pid(task);
2000 wake_up_all(&pid->wait_pidfd);
2001}
2002
2003
2004
2005
2006
2007
2008
2009
2010bool do_notify_parent(struct task_struct *tsk, int sig)
2011{
2012 struct kernel_siginfo info;
2013 unsigned long flags;
2014 struct sighand_struct *psig;
2015 bool autoreap = false;
2016 u64 utime, stime;
2017
2018 BUG_ON(sig == -1);
2019
2020
2021 BUG_ON(task_is_stopped_or_traced(tsk));
2022
2023 BUG_ON(!tsk->ptrace &&
2024 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2025
2026
2027 do_notify_pidfd(tsk);
2028
2029 if (sig != SIGCHLD) {
2030
2031
2032
2033
2034 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2035 sig = SIGCHLD;
2036 }
2037
2038 clear_siginfo(&info);
2039 info.si_signo = sig;
2040 info.si_errno = 0;
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052 rcu_read_lock();
2053 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2054 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2055 task_uid(tsk));
2056 rcu_read_unlock();
2057
2058 task_cputime(tsk, &utime, &stime);
2059 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2060 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2061
2062 info.si_status = tsk->exit_code & 0x7f;
2063 if (tsk->exit_code & 0x80)
2064 info.si_code = CLD_DUMPED;
2065 else if (tsk->exit_code & 0x7f)
2066 info.si_code = CLD_KILLED;
2067 else {
2068 info.si_code = CLD_EXITED;
2069 info.si_status = tsk->exit_code >> 8;
2070 }
2071
2072 psig = tsk->parent->sighand;
2073 spin_lock_irqsave(&psig->siglock, flags);
2074 if (!tsk->ptrace && sig == SIGCHLD &&
2075 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2076 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092 autoreap = true;
2093 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2094 sig = 0;
2095 }
2096
2097
2098
2099
2100 if (valid_signal(sig) && sig)
2101 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2102 __wake_up_parent(tsk, tsk->parent);
2103 spin_unlock_irqrestore(&psig->siglock, flags);
2104
2105 return autoreap;
2106}
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121static void do_notify_parent_cldstop(struct task_struct *tsk,
2122 bool for_ptracer, int why)
2123{
2124 struct kernel_siginfo info;
2125 unsigned long flags;
2126 struct task_struct *parent;
2127 struct sighand_struct *sighand;
2128 u64 utime, stime;
2129
2130 if (for_ptracer) {
2131 parent = tsk->parent;
2132 } else {
2133 tsk = tsk->group_leader;
2134 parent = tsk->real_parent;
2135 }
2136
2137 clear_siginfo(&info);
2138 info.si_signo = SIGCHLD;
2139 info.si_errno = 0;
2140
2141
2142
2143 rcu_read_lock();
2144 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2145 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2146 rcu_read_unlock();
2147
2148 task_cputime(tsk, &utime, &stime);
2149 info.si_utime = nsec_to_clock_t(utime);
2150 info.si_stime = nsec_to_clock_t(stime);
2151
2152 info.si_code = why;
2153 switch (why) {
2154 case CLD_CONTINUED:
2155 info.si_status = SIGCONT;
2156 break;
2157 case CLD_STOPPED:
2158 info.si_status = tsk->signal->group_exit_code & 0x7f;
2159 break;
2160 case CLD_TRAPPED:
2161 info.si_status = tsk->exit_code & 0x7f;
2162 break;
2163 default:
2164 BUG();
2165 }
2166
2167 sighand = parent->sighand;
2168 spin_lock_irqsave(&sighand->siglock, flags);
2169 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2170 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2171 __group_send_sig_info(SIGCHLD, &info, parent);
2172
2173
2174
2175 __wake_up_parent(tsk, parent);
2176 spin_unlock_irqrestore(&sighand->siglock, flags);
2177}
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2191 __releases(¤t->sighand->siglock)
2192 __acquires(¤t->sighand->siglock)
2193{
2194 bool gstop_done = false;
2195
2196 if (arch_ptrace_stop_needed()) {
2197
2198
2199
2200
2201
2202
2203
2204
2205 spin_unlock_irq(¤t->sighand->siglock);
2206 arch_ptrace_stop();
2207 spin_lock_irq(¤t->sighand->siglock);
2208 }
2209
2210
2211
2212
2213
2214 set_special_state(TASK_TRACED);
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234 smp_wmb();
2235
2236 current->last_siginfo = info;
2237 current->exit_code = exit_code;
2238
2239
2240
2241
2242
2243
2244
2245
2246 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2247 gstop_done = task_participate_group_stop(current);
2248
2249
2250 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2251 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2252 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2253
2254
2255 task_clear_jobctl_trapping(current);
2256
2257 spin_unlock_irq(¤t->sighand->siglock);
2258 read_lock(&tasklist_lock);
2259 if (likely(current->ptrace)) {
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270 do_notify_parent_cldstop(current, true, why);
2271 if (gstop_done && ptrace_reparented(current))
2272 do_notify_parent_cldstop(current, false, why);
2273
2274
2275
2276
2277
2278
2279
2280 preempt_disable();
2281 read_unlock(&tasklist_lock);
2282 cgroup_enter_frozen();
2283 preempt_enable_no_resched();
2284 freezable_schedule();
2285 cgroup_leave_frozen(true);
2286 } else {
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297 if (gstop_done)
2298 do_notify_parent_cldstop(current, false, why);
2299
2300
2301 __set_current_state(TASK_RUNNING);
2302 if (clear_code)
2303 current->exit_code = 0;
2304 read_unlock(&tasklist_lock);
2305 }
2306
2307
2308
2309
2310
2311
2312 spin_lock_irq(¤t->sighand->siglock);
2313 current->last_siginfo = NULL;
2314
2315
2316 current->jobctl &= ~JOBCTL_LISTENING;
2317
2318
2319
2320
2321
2322
2323 recalc_sigpending_tsk(current);
2324}
2325
2326static void ptrace_do_notify(int signr, int exit_code, int why)
2327{
2328 kernel_siginfo_t info;
2329
2330 clear_siginfo(&info);
2331 info.si_signo = signr;
2332 info.si_code = exit_code;
2333 info.si_pid = task_pid_vnr(current);
2334 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2335
2336
2337 ptrace_stop(exit_code, why, 1, &info);
2338}
2339
2340void ptrace_notify(int exit_code)
2341{
2342 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2343 if (unlikely(current->task_works))
2344 task_work_run();
2345
2346 spin_lock_irq(¤t->sighand->siglock);
2347 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2348 spin_unlock_irq(¤t->sighand->siglock);
2349}
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373static bool do_signal_stop(int signr)
2374 __releases(¤t->sighand->siglock)
2375{
2376 struct signal_struct *sig = current->signal;
2377
2378 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2379 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2380 struct task_struct *t;
2381
2382
2383 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2384
2385 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2386 unlikely(signal_group_exit(sig)))
2387 return false;
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2408 sig->group_exit_code = signr;
2409
2410 sig->group_stop_count = 0;
2411
2412 if (task_set_jobctl_pending(current, signr | gstop))
2413 sig->group_stop_count++;
2414
2415 t = current;
2416 while_each_thread(current, t) {
2417
2418
2419
2420
2421
2422 if (!task_is_stopped(t) &&
2423 task_set_jobctl_pending(t, signr | gstop)) {
2424 sig->group_stop_count++;
2425 if (likely(!(t->ptrace & PT_SEIZED)))
2426 signal_wake_up(t, 0);
2427 else
2428 ptrace_trap_notify(t);
2429 }
2430 }
2431 }
2432
2433 if (likely(!current->ptrace)) {
2434 int notify = 0;
2435
2436
2437
2438
2439
2440
2441 if (task_participate_group_stop(current))
2442 notify = CLD_STOPPED;
2443
2444 set_special_state(TASK_STOPPED);
2445 spin_unlock_irq(¤t->sighand->siglock);
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456 if (notify) {
2457 read_lock(&tasklist_lock);
2458 do_notify_parent_cldstop(current, false, notify);
2459 read_unlock(&tasklist_lock);
2460 }
2461
2462
2463 cgroup_enter_frozen();
2464 freezable_schedule();
2465 return true;
2466 } else {
2467
2468
2469
2470
2471 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2472 return false;
2473 }
2474}
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491static void do_jobctl_trap(void)
2492{
2493 struct signal_struct *signal = current->signal;
2494 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2495
2496 if (current->ptrace & PT_SEIZED) {
2497 if (!signal->group_stop_count &&
2498 !(signal->flags & SIGNAL_STOP_STOPPED))
2499 signr = SIGTRAP;
2500 WARN_ON_ONCE(!signr);
2501 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2502 CLD_STOPPED);
2503 } else {
2504 WARN_ON_ONCE(!signr);
2505 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2506 current->exit_code = 0;
2507 }
2508}
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520static void do_freezer_trap(void)
2521 __releases(¤t->sighand->siglock)
2522{
2523
2524
2525
2526
2527
2528 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2529 JOBCTL_TRAP_FREEZE) {
2530 spin_unlock_irq(¤t->sighand->siglock);
2531 return;
2532 }
2533
2534
2535
2536
2537
2538
2539
2540 __set_current_state(TASK_INTERRUPTIBLE);
2541 clear_thread_flag(TIF_SIGPENDING);
2542 spin_unlock_irq(¤t->sighand->siglock);
2543 cgroup_enter_frozen();
2544 freezable_schedule();
2545}
2546
2547static int ptrace_signal(int signr, kernel_siginfo_t *info)
2548{
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2559 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2560
2561
2562 signr = current->exit_code;
2563 if (signr == 0)
2564 return signr;
2565
2566 current->exit_code = 0;
2567
2568
2569
2570
2571
2572
2573
2574 if (signr != info->si_signo) {
2575 clear_siginfo(info);
2576 info->si_signo = signr;
2577 info->si_errno = 0;
2578 info->si_code = SI_USER;
2579 rcu_read_lock();
2580 info->si_pid = task_pid_vnr(current->parent);
2581 info->si_uid = from_kuid_munged(current_user_ns(),
2582 task_uid(current->parent));
2583 rcu_read_unlock();
2584 }
2585
2586
2587 if (sigismember(¤t->blocked, signr)) {
2588 send_signal(signr, info, current, PIDTYPE_PID);
2589 signr = 0;
2590 }
2591
2592 return signr;
2593}
2594
2595static void hide_si_addr_tag_bits(struct ksignal *ksig)
2596{
2597 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2598 case SIL_FAULT:
2599 case SIL_FAULT_TRAPNO:
2600 case SIL_FAULT_MCEERR:
2601 case SIL_FAULT_BNDERR:
2602 case SIL_FAULT_PKUERR:
2603 case SIL_FAULT_PERF_EVENT:
2604 ksig->info.si_addr = arch_untagged_si_addr(
2605 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2606 break;
2607 case SIL_KILL:
2608 case SIL_TIMER:
2609 case SIL_POLL:
2610 case SIL_CHLD:
2611 case SIL_RT:
2612 case SIL_SYS:
2613 break;
2614 }
2615}
2616
2617bool get_signal(struct ksignal *ksig)
2618{
2619 struct sighand_struct *sighand = current->sighand;
2620 struct signal_struct *signal = current->signal;
2621 int signr;
2622
2623 if (unlikely(current->task_works))
2624 task_work_run();
2625
2626
2627
2628
2629
2630
2631 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2632 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2633 tracehook_notify_signal();
2634 if (!task_sigpending(current))
2635 return false;
2636 }
2637
2638 if (unlikely(uprobe_deny_signal()))
2639 return false;
2640
2641
2642
2643
2644
2645
2646 try_to_freeze();
2647
2648relock:
2649 spin_lock_irq(&sighand->siglock);
2650
2651
2652
2653
2654
2655
2656 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2657 int why;
2658
2659 if (signal->flags & SIGNAL_CLD_CONTINUED)
2660 why = CLD_CONTINUED;
2661 else
2662 why = CLD_STOPPED;
2663
2664 signal->flags &= ~SIGNAL_CLD_MASK;
2665
2666 spin_unlock_irq(&sighand->siglock);
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676 read_lock(&tasklist_lock);
2677 do_notify_parent_cldstop(current, false, why);
2678
2679 if (ptrace_reparented(current->group_leader))
2680 do_notify_parent_cldstop(current->group_leader,
2681 true, why);
2682 read_unlock(&tasklist_lock);
2683
2684 goto relock;
2685 }
2686
2687
2688 if (signal_group_exit(signal)) {
2689 ksig->info.si_signo = signr = SIGKILL;
2690 sigdelset(¤t->pending.signal, SIGKILL);
2691 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2692 &sighand->action[SIGKILL - 1]);
2693 recalc_sigpending();
2694 goto fatal;
2695 }
2696
2697 for (;;) {
2698 struct k_sigaction *ka;
2699
2700 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2701 do_signal_stop(0))
2702 goto relock;
2703
2704 if (unlikely(current->jobctl &
2705 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2706 if (current->jobctl & JOBCTL_TRAP_MASK) {
2707 do_jobctl_trap();
2708 spin_unlock_irq(&sighand->siglock);
2709 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2710 do_freezer_trap();
2711
2712 goto relock;
2713 }
2714
2715
2716
2717
2718
2719 if (unlikely(cgroup_task_frozen(current))) {
2720 spin_unlock_irq(&sighand->siglock);
2721 cgroup_leave_frozen(false);
2722 goto relock;
2723 }
2724
2725
2726
2727
2728
2729
2730
2731 signr = dequeue_synchronous_signal(&ksig->info);
2732 if (!signr)
2733 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2734
2735 if (!signr)
2736 break;
2737
2738 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2739 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2740 signr = ptrace_signal(signr, &ksig->info);
2741 if (!signr)
2742 continue;
2743 }
2744
2745 ka = &sighand->action[signr-1];
2746
2747
2748 trace_signal_deliver(signr, &ksig->info, ka);
2749
2750 if (ka->sa.sa_handler == SIG_IGN)
2751 continue;
2752 if (ka->sa.sa_handler != SIG_DFL) {
2753
2754 ksig->ka = *ka;
2755
2756 if (ka->sa.sa_flags & SA_ONESHOT)
2757 ka->sa.sa_handler = SIG_DFL;
2758
2759 break;
2760 }
2761
2762
2763
2764
2765 if (sig_kernel_ignore(signr))
2766 continue;
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2779 !sig_kernel_only(signr))
2780 continue;
2781
2782 if (sig_kernel_stop(signr)) {
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793 if (signr != SIGSTOP) {
2794 spin_unlock_irq(&sighand->siglock);
2795
2796
2797
2798 if (is_current_pgrp_orphaned())
2799 goto relock;
2800
2801 spin_lock_irq(&sighand->siglock);
2802 }
2803
2804 if (likely(do_signal_stop(ksig->info.si_signo))) {
2805
2806 goto relock;
2807 }
2808
2809
2810
2811
2812
2813 continue;
2814 }
2815
2816 fatal:
2817 spin_unlock_irq(&sighand->siglock);
2818 if (unlikely(cgroup_task_frozen(current)))
2819 cgroup_leave_frozen(true);
2820
2821
2822
2823
2824 current->flags |= PF_SIGNALED;
2825
2826 if (sig_kernel_coredump(signr)) {
2827 if (print_fatal_signals)
2828 print_fatal_signal(ksig->info.si_signo);
2829 proc_coredump_connector(current);
2830
2831
2832
2833
2834
2835
2836
2837
2838 do_coredump(&ksig->info);
2839 }
2840
2841
2842
2843
2844
2845
2846 if (current->flags & PF_IO_WORKER)
2847 goto out;
2848
2849
2850
2851
2852 do_group_exit(ksig->info.si_signo);
2853
2854 }
2855 spin_unlock_irq(&sighand->siglock);
2856out:
2857 ksig->sig = signr;
2858
2859 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2860 hide_si_addr_tag_bits(ksig);
2861
2862 return ksig->sig > 0;
2863}
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875static void signal_delivered(struct ksignal *ksig, int stepping)
2876{
2877 sigset_t blocked;
2878
2879
2880
2881
2882
2883 clear_restore_sigmask();
2884
2885 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2886 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2887 sigaddset(&blocked, ksig->sig);
2888 set_current_blocked(&blocked);
2889 if (current->sas_ss_flags & SS_AUTODISARM)
2890 sas_ss_reset(current);
2891 tracehook_signal_handler(stepping);
2892}
2893
2894void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2895{
2896 if (failed)
2897 force_sigsegv(ksig->sig);
2898 else
2899 signal_delivered(ksig, stepping);
2900}
2901
2902
2903
2904
2905
2906
2907static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2908{
2909 sigset_t retarget;
2910 struct task_struct *t;
2911
2912 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2913 if (sigisemptyset(&retarget))
2914 return;
2915
2916 t = tsk;
2917 while_each_thread(tsk, t) {
2918 if (t->flags & PF_EXITING)
2919 continue;
2920
2921 if (!has_pending_signals(&retarget, &t->blocked))
2922 continue;
2923
2924 sigandsets(&retarget, &retarget, &t->blocked);
2925
2926 if (!task_sigpending(t))
2927 signal_wake_up(t, 0);
2928
2929 if (sigisemptyset(&retarget))
2930 break;
2931 }
2932}
2933
2934void exit_signals(struct task_struct *tsk)
2935{
2936 int group_stop = 0;
2937 sigset_t unblocked;
2938
2939
2940
2941
2942
2943 cgroup_threadgroup_change_begin(tsk);
2944
2945 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2946 tsk->flags |= PF_EXITING;
2947 cgroup_threadgroup_change_end(tsk);
2948 return;
2949 }
2950
2951 spin_lock_irq(&tsk->sighand->siglock);
2952
2953
2954
2955
2956 tsk->flags |= PF_EXITING;
2957
2958 cgroup_threadgroup_change_end(tsk);
2959
2960 if (!task_sigpending(tsk))
2961 goto out;
2962
2963 unblocked = tsk->blocked;
2964 signotset(&unblocked);
2965 retarget_shared_pending(tsk, &unblocked);
2966
2967 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2968 task_participate_group_stop(tsk))
2969 group_stop = CLD_STOPPED;
2970out:
2971 spin_unlock_irq(&tsk->sighand->siglock);
2972
2973
2974
2975
2976
2977 if (unlikely(group_stop)) {
2978 read_lock(&tasklist_lock);
2979 do_notify_parent_cldstop(tsk, false, group_stop);
2980 read_unlock(&tasklist_lock);
2981 }
2982}
2983
2984
2985
2986
2987
2988
2989
2990
2991SYSCALL_DEFINE0(restart_syscall)
2992{
2993 struct restart_block *restart = ¤t->restart_block;
2994 return restart->fn(restart);
2995}
2996
2997long do_no_restart_syscall(struct restart_block *param)
2998{
2999 return -EINTR;
3000}
3001
3002static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3003{
3004 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3005 sigset_t newblocked;
3006
3007 sigandnsets(&newblocked, newset, ¤t->blocked);
3008 retarget_shared_pending(tsk, &newblocked);
3009 }
3010 tsk->blocked = *newset;
3011 recalc_sigpending();
3012}
3013
3014
3015
3016
3017
3018
3019
3020
3021void set_current_blocked(sigset_t *newset)
3022{
3023 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3024 __set_current_blocked(newset);
3025}
3026
3027void __set_current_blocked(const sigset_t *newset)
3028{
3029 struct task_struct *tsk = current;
3030
3031
3032
3033
3034
3035 if (sigequalsets(&tsk->blocked, newset))
3036 return;
3037
3038 spin_lock_irq(&tsk->sighand->siglock);
3039 __set_task_blocked(tsk, newset);
3040 spin_unlock_irq(&tsk->sighand->siglock);
3041}
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3052{
3053 struct task_struct *tsk = current;
3054 sigset_t newset;
3055
3056
3057 if (oldset)
3058 *oldset = tsk->blocked;
3059
3060 switch (how) {
3061 case SIG_BLOCK:
3062 sigorsets(&newset, &tsk->blocked, set);
3063 break;
3064 case SIG_UNBLOCK:
3065 sigandnsets(&newset, &tsk->blocked, set);
3066 break;
3067 case SIG_SETMASK:
3068 newset = *set;
3069 break;
3070 default:
3071 return -EINVAL;
3072 }
3073
3074 __set_current_blocked(&newset);
3075 return 0;
3076}
3077EXPORT_SYMBOL(sigprocmask);
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3089{
3090 sigset_t kmask;
3091
3092 if (!umask)
3093 return 0;
3094 if (sigsetsize != sizeof(sigset_t))
3095 return -EINVAL;
3096 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3097 return -EFAULT;
3098
3099 set_restore_sigmask();
3100 current->saved_sigmask = current->blocked;
3101 set_current_blocked(&kmask);
3102
3103 return 0;
3104}
3105
3106#ifdef CONFIG_COMPAT
3107int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3108 size_t sigsetsize)
3109{
3110 sigset_t kmask;
3111
3112 if (!umask)
3113 return 0;
3114 if (sigsetsize != sizeof(compat_sigset_t))
3115 return -EINVAL;
3116 if (get_compat_sigset(&kmask, umask))
3117 return -EFAULT;
3118
3119 set_restore_sigmask();
3120 current->saved_sigmask = current->blocked;
3121 set_current_blocked(&kmask);
3122
3123 return 0;
3124}
3125#endif
3126
3127
3128
3129
3130
3131
3132
3133
3134SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3135 sigset_t __user *, oset, size_t, sigsetsize)
3136{
3137 sigset_t old_set, new_set;
3138 int error;
3139
3140
3141 if (sigsetsize != sizeof(sigset_t))
3142 return -EINVAL;
3143
3144 old_set = current->blocked;
3145
3146 if (nset) {
3147 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3148 return -EFAULT;
3149 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3150
3151 error = sigprocmask(how, &new_set, NULL);
3152 if (error)
3153 return error;
3154 }
3155
3156 if (oset) {
3157 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3158 return -EFAULT;
3159 }
3160
3161 return 0;
3162}
3163
3164#ifdef CONFIG_COMPAT
3165COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3166 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3167{
3168 sigset_t old_set = current->blocked;
3169
3170
3171 if (sigsetsize != sizeof(sigset_t))
3172 return -EINVAL;
3173
3174 if (nset) {
3175 sigset_t new_set;
3176 int error;
3177 if (get_compat_sigset(&new_set, nset))
3178 return -EFAULT;
3179 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3180
3181 error = sigprocmask(how, &new_set, NULL);
3182 if (error)
3183 return error;
3184 }
3185 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3186}
3187#endif
3188
3189static void do_sigpending(sigset_t *set)
3190{
3191 spin_lock_irq(¤t->sighand->siglock);
3192 sigorsets(set, ¤t->pending.signal,
3193 ¤t->signal->shared_pending.signal);
3194 spin_unlock_irq(¤t->sighand->siglock);
3195
3196
3197 sigandsets(set, ¤t->blocked, set);
3198}
3199
3200
3201
3202
3203
3204
3205
3206SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3207{
3208 sigset_t set;
3209
3210 if (sigsetsize > sizeof(*uset))
3211 return -EINVAL;
3212
3213 do_sigpending(&set);
3214
3215 if (copy_to_user(uset, &set, sigsetsize))
3216 return -EFAULT;
3217
3218 return 0;
3219}
3220
3221#ifdef CONFIG_COMPAT
3222COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3223 compat_size_t, sigsetsize)
3224{
3225 sigset_t set;
3226
3227 if (sigsetsize > sizeof(*uset))
3228 return -EINVAL;
3229
3230 do_sigpending(&set);
3231
3232 return put_compat_sigset(uset, &set, sigsetsize);
3233}
3234#endif
3235
3236static const struct {
3237 unsigned char limit, layout;
3238} sig_sicodes[] = {
3239 [SIGILL] = { NSIGILL, SIL_FAULT },
3240 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3241 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3242 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3243 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3244#if defined(SIGEMT)
3245 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3246#endif
3247 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3248 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3249 [SIGSYS] = { NSIGSYS, SIL_SYS },
3250};
3251
3252static bool known_siginfo_layout(unsigned sig, int si_code)
3253{
3254 if (si_code == SI_KERNEL)
3255 return true;
3256 else if ((si_code > SI_USER)) {
3257 if (sig_specific_sicodes(sig)) {
3258 if (si_code <= sig_sicodes[sig].limit)
3259 return true;
3260 }
3261 else if (si_code <= NSIGPOLL)
3262 return true;
3263 }
3264 else if (si_code >= SI_DETHREAD)
3265 return true;
3266 else if (si_code == SI_ASYNCNL)
3267 return true;
3268 return false;
3269}
3270
3271enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3272{
3273 enum siginfo_layout layout = SIL_KILL;
3274 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3275 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3276 (si_code <= sig_sicodes[sig].limit)) {
3277 layout = sig_sicodes[sig].layout;
3278
3279 if ((sig == SIGBUS) &&
3280 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3281 layout = SIL_FAULT_MCEERR;
3282 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3283 layout = SIL_FAULT_BNDERR;
3284#ifdef SEGV_PKUERR
3285 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3286 layout = SIL_FAULT_PKUERR;
3287#endif
3288 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3289 layout = SIL_FAULT_PERF_EVENT;
3290 else if (IS_ENABLED(CONFIG_SPARC) &&
3291 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3292 layout = SIL_FAULT_TRAPNO;
3293 else if (IS_ENABLED(CONFIG_ALPHA) &&
3294 ((sig == SIGFPE) ||
3295 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3296 layout = SIL_FAULT_TRAPNO;
3297 }
3298 else if (si_code <= NSIGPOLL)
3299 layout = SIL_POLL;
3300 } else {
3301 if (si_code == SI_TIMER)
3302 layout = SIL_TIMER;
3303 else if (si_code == SI_SIGIO)
3304 layout = SIL_POLL;
3305 else if (si_code < 0)
3306 layout = SIL_RT;
3307 }
3308 return layout;
3309}
3310
3311static inline char __user *si_expansion(const siginfo_t __user *info)
3312{
3313 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3314}
3315
3316int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3317{
3318 char __user *expansion = si_expansion(to);
3319 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3320 return -EFAULT;
3321 if (clear_user(expansion, SI_EXPANSION_SIZE))
3322 return -EFAULT;
3323 return 0;
3324}
3325
3326static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3327 const siginfo_t __user *from)
3328{
3329 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3330 char __user *expansion = si_expansion(from);
3331 char buf[SI_EXPANSION_SIZE];
3332 int i;
3333
3334
3335
3336
3337
3338
3339 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3340 return -EFAULT;
3341 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3342 if (buf[i] != 0)
3343 return -E2BIG;
3344 }
3345 }
3346 return 0;
3347}
3348
3349static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3350 const siginfo_t __user *from)
3351{
3352 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3353 return -EFAULT;
3354 to->si_signo = signo;
3355 return post_copy_siginfo_from_user(to, from);
3356}
3357
3358int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3359{
3360 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3361 return -EFAULT;
3362 return post_copy_siginfo_from_user(to, from);
3363}
3364
3365#ifdef CONFIG_COMPAT
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376void copy_siginfo_to_external32(struct compat_siginfo *to,
3377 const struct kernel_siginfo *from)
3378{
3379 memset(to, 0, sizeof(*to));
3380
3381 to->si_signo = from->si_signo;
3382 to->si_errno = from->si_errno;
3383 to->si_code = from->si_code;
3384 switch(siginfo_layout(from->si_signo, from->si_code)) {
3385 case SIL_KILL:
3386 to->si_pid = from->si_pid;
3387 to->si_uid = from->si_uid;
3388 break;
3389 case SIL_TIMER:
3390 to->si_tid = from->si_tid;
3391 to->si_overrun = from->si_overrun;
3392 to->si_int = from->si_int;
3393 break;
3394 case SIL_POLL:
3395 to->si_band = from->si_band;
3396 to->si_fd = from->si_fd;
3397 break;
3398 case SIL_FAULT:
3399 to->si_addr = ptr_to_compat(from->si_addr);
3400 break;
3401 case SIL_FAULT_TRAPNO:
3402 to->si_addr = ptr_to_compat(from->si_addr);
3403 to->si_trapno = from->si_trapno;
3404 break;
3405 case SIL_FAULT_MCEERR:
3406 to->si_addr = ptr_to_compat(from->si_addr);
3407 to->si_addr_lsb = from->si_addr_lsb;
3408 break;
3409 case SIL_FAULT_BNDERR:
3410 to->si_addr = ptr_to_compat(from->si_addr);
3411 to->si_lower = ptr_to_compat(from->si_lower);
3412 to->si_upper = ptr_to_compat(from->si_upper);
3413 break;
3414 case SIL_FAULT_PKUERR:
3415 to->si_addr = ptr_to_compat(from->si_addr);
3416 to->si_pkey = from->si_pkey;
3417 break;
3418 case SIL_FAULT_PERF_EVENT:
3419 to->si_addr = ptr_to_compat(from->si_addr);
3420 to->si_perf_data = from->si_perf_data;
3421 to->si_perf_type = from->si_perf_type;
3422 break;
3423 case SIL_CHLD:
3424 to->si_pid = from->si_pid;
3425 to->si_uid = from->si_uid;
3426 to->si_status = from->si_status;
3427 to->si_utime = from->si_utime;
3428 to->si_stime = from->si_stime;
3429 break;
3430 case SIL_RT:
3431 to->si_pid = from->si_pid;
3432 to->si_uid = from->si_uid;
3433 to->si_int = from->si_int;
3434 break;
3435 case SIL_SYS:
3436 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3437 to->si_syscall = from->si_syscall;
3438 to->si_arch = from->si_arch;
3439 break;
3440 }
3441}
3442
3443int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3444 const struct kernel_siginfo *from)
3445{
3446 struct compat_siginfo new;
3447
3448 copy_siginfo_to_external32(&new, from);
3449 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3450 return -EFAULT;
3451 return 0;
3452}
3453
3454static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3455 const struct compat_siginfo *from)
3456{
3457 clear_siginfo(to);
3458 to->si_signo = from->si_signo;
3459 to->si_errno = from->si_errno;
3460 to->si_code = from->si_code;
3461 switch(siginfo_layout(from->si_signo, from->si_code)) {
3462 case SIL_KILL:
3463 to->si_pid = from->si_pid;
3464 to->si_uid = from->si_uid;
3465 break;
3466 case SIL_TIMER:
3467 to->si_tid = from->si_tid;
3468 to->si_overrun = from->si_overrun;
3469 to->si_int = from->si_int;
3470 break;
3471 case SIL_POLL:
3472 to->si_band = from->si_band;
3473 to->si_fd = from->si_fd;
3474 break;
3475 case SIL_FAULT:
3476 to->si_addr = compat_ptr(from->si_addr);
3477 break;
3478 case SIL_FAULT_TRAPNO:
3479 to->si_addr = compat_ptr(from->si_addr);
3480 to->si_trapno = from->si_trapno;
3481 break;
3482 case SIL_FAULT_MCEERR:
3483 to->si_addr = compat_ptr(from->si_addr);
3484 to->si_addr_lsb = from->si_addr_lsb;
3485 break;
3486 case SIL_FAULT_BNDERR:
3487 to->si_addr = compat_ptr(from->si_addr);
3488 to->si_lower = compat_ptr(from->si_lower);
3489 to->si_upper = compat_ptr(from->si_upper);
3490 break;
3491 case SIL_FAULT_PKUERR:
3492 to->si_addr = compat_ptr(from->si_addr);
3493 to->si_pkey = from->si_pkey;
3494 break;
3495 case SIL_FAULT_PERF_EVENT:
3496 to->si_addr = compat_ptr(from->si_addr);
3497 to->si_perf_data = from->si_perf_data;
3498 to->si_perf_type = from->si_perf_type;
3499 break;
3500 case SIL_CHLD:
3501 to->si_pid = from->si_pid;
3502 to->si_uid = from->si_uid;
3503 to->si_status = from->si_status;
3504#ifdef CONFIG_X86_X32_ABI
3505 if (in_x32_syscall()) {
3506 to->si_utime = from->_sifields._sigchld_x32._utime;
3507 to->si_stime = from->_sifields._sigchld_x32._stime;
3508 } else
3509#endif
3510 {
3511 to->si_utime = from->si_utime;
3512 to->si_stime = from->si_stime;
3513 }
3514 break;
3515 case SIL_RT:
3516 to->si_pid = from->si_pid;
3517 to->si_uid = from->si_uid;
3518 to->si_int = from->si_int;
3519 break;
3520 case SIL_SYS:
3521 to->si_call_addr = compat_ptr(from->si_call_addr);
3522 to->si_syscall = from->si_syscall;
3523 to->si_arch = from->si_arch;
3524 break;
3525 }
3526 return 0;
3527}
3528
3529static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3530 const struct compat_siginfo __user *ufrom)
3531{
3532 struct compat_siginfo from;
3533
3534 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3535 return -EFAULT;
3536
3537 from.si_signo = signo;
3538 return post_copy_siginfo_from_user32(to, &from);
3539}
3540
3541int copy_siginfo_from_user32(struct kernel_siginfo *to,
3542 const struct compat_siginfo __user *ufrom)
3543{
3544 struct compat_siginfo from;
3545
3546 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3547 return -EFAULT;
3548
3549 return post_copy_siginfo_from_user32(to, &from);
3550}
3551#endif
3552
3553
3554
3555
3556
3557
3558
3559static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3560 const struct timespec64 *ts)
3561{
3562 ktime_t *to = NULL, timeout = KTIME_MAX;
3563 struct task_struct *tsk = current;
3564 sigset_t mask = *which;
3565 int sig, ret = 0;
3566
3567 if (ts) {
3568 if (!timespec64_valid(ts))
3569 return -EINVAL;
3570 timeout = timespec64_to_ktime(*ts);
3571 to = &timeout;
3572 }
3573
3574
3575
3576
3577 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3578 signotset(&mask);
3579
3580 spin_lock_irq(&tsk->sighand->siglock);
3581 sig = dequeue_signal(tsk, &mask, info);
3582 if (!sig && timeout) {
3583
3584
3585
3586
3587
3588
3589 tsk->real_blocked = tsk->blocked;
3590 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3591 recalc_sigpending();
3592 spin_unlock_irq(&tsk->sighand->siglock);
3593
3594 __set_current_state(TASK_INTERRUPTIBLE);
3595 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3596 HRTIMER_MODE_REL);
3597 spin_lock_irq(&tsk->sighand->siglock);
3598 __set_task_blocked(tsk, &tsk->real_blocked);
3599 sigemptyset(&tsk->real_blocked);
3600 sig = dequeue_signal(tsk, &mask, info);
3601 }
3602 spin_unlock_irq(&tsk->sighand->siglock);
3603
3604 if (sig)
3605 return sig;
3606 return ret ? -EINTR : -EAGAIN;
3607}
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3618 siginfo_t __user *, uinfo,
3619 const struct __kernel_timespec __user *, uts,
3620 size_t, sigsetsize)
3621{
3622 sigset_t these;
3623 struct timespec64 ts;
3624 kernel_siginfo_t info;
3625 int ret;
3626
3627
3628 if (sigsetsize != sizeof(sigset_t))
3629 return -EINVAL;
3630
3631 if (copy_from_user(&these, uthese, sizeof(these)))
3632 return -EFAULT;
3633
3634 if (uts) {
3635 if (get_timespec64(&ts, uts))
3636 return -EFAULT;
3637 }
3638
3639 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3640
3641 if (ret > 0 && uinfo) {
3642 if (copy_siginfo_to_user(uinfo, &info))
3643 ret = -EFAULT;
3644 }
3645
3646 return ret;
3647}
3648
3649#ifdef CONFIG_COMPAT_32BIT_TIME
3650SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3651 siginfo_t __user *, uinfo,
3652 const struct old_timespec32 __user *, uts,
3653 size_t, sigsetsize)
3654{
3655 sigset_t these;
3656 struct timespec64 ts;
3657 kernel_siginfo_t info;
3658 int ret;
3659
3660 if (sigsetsize != sizeof(sigset_t))
3661 return -EINVAL;
3662
3663 if (copy_from_user(&these, uthese, sizeof(these)))
3664 return -EFAULT;
3665
3666 if (uts) {
3667 if (get_old_timespec32(&ts, uts))
3668 return -EFAULT;
3669 }
3670
3671 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3672
3673 if (ret > 0 && uinfo) {
3674 if (copy_siginfo_to_user(uinfo, &info))
3675 ret = -EFAULT;
3676 }
3677
3678 return ret;
3679}
3680#endif
3681
3682#ifdef CONFIG_COMPAT
3683COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3684 struct compat_siginfo __user *, uinfo,
3685 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3686{
3687 sigset_t s;
3688 struct timespec64 t;
3689 kernel_siginfo_t info;
3690 long ret;
3691
3692 if (sigsetsize != sizeof(sigset_t))
3693 return -EINVAL;
3694
3695 if (get_compat_sigset(&s, uthese))
3696 return -EFAULT;
3697
3698 if (uts) {
3699 if (get_timespec64(&t, uts))
3700 return -EFAULT;
3701 }
3702
3703 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3704
3705 if (ret > 0 && uinfo) {
3706 if (copy_siginfo_to_user32(uinfo, &info))
3707 ret = -EFAULT;
3708 }
3709
3710 return ret;
3711}
3712
3713#ifdef CONFIG_COMPAT_32BIT_TIME
3714COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3715 struct compat_siginfo __user *, uinfo,
3716 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3717{
3718 sigset_t s;
3719 struct timespec64 t;
3720 kernel_siginfo_t info;
3721 long ret;
3722
3723 if (sigsetsize != sizeof(sigset_t))
3724 return -EINVAL;
3725
3726 if (get_compat_sigset(&s, uthese))
3727 return -EFAULT;
3728
3729 if (uts) {
3730 if (get_old_timespec32(&t, uts))
3731 return -EFAULT;
3732 }
3733
3734 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3735
3736 if (ret > 0 && uinfo) {
3737 if (copy_siginfo_to_user32(uinfo, &info))
3738 ret = -EFAULT;
3739 }
3740
3741 return ret;
3742}
3743#endif
3744#endif
3745
3746static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3747{
3748 clear_siginfo(info);
3749 info->si_signo = sig;
3750 info->si_errno = 0;
3751 info->si_code = SI_USER;
3752 info->si_pid = task_tgid_vnr(current);
3753 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3754}
3755
3756
3757
3758
3759
3760
3761SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3762{
3763 struct kernel_siginfo info;
3764
3765 prepare_kill_siginfo(sig, &info);
3766
3767 return kill_something_info(sig, &info, pid);
3768}
3769
3770
3771
3772
3773
3774
3775static bool access_pidfd_pidns(struct pid *pid)
3776{
3777 struct pid_namespace *active = task_active_pid_ns(current);
3778 struct pid_namespace *p = ns_of_pid(pid);
3779
3780 for (;;) {
3781 if (!p)
3782 return false;
3783 if (p == active)
3784 break;
3785 p = p->parent;
3786 }
3787
3788 return true;
3789}
3790
3791static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3792 siginfo_t __user *info)
3793{
3794#ifdef CONFIG_COMPAT
3795
3796
3797
3798
3799
3800 if (in_compat_syscall())
3801 return copy_siginfo_from_user32(
3802 kinfo, (struct compat_siginfo __user *)info);
3803#endif
3804 return copy_siginfo_from_user(kinfo, info);
3805}
3806
3807static struct pid *pidfd_to_pid(const struct file *file)
3808{
3809 struct pid *pid;
3810
3811 pid = pidfd_pid(file);
3812 if (!IS_ERR(pid))
3813 return pid;
3814
3815 return tgid_pidfd_to_pid(file);
3816}
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3837 siginfo_t __user *, info, unsigned int, flags)
3838{
3839 int ret;
3840 struct fd f;
3841 struct pid *pid;
3842 kernel_siginfo_t kinfo;
3843
3844
3845 if (flags)
3846 return -EINVAL;
3847
3848 f = fdget(pidfd);
3849 if (!f.file)
3850 return -EBADF;
3851
3852
3853 pid = pidfd_to_pid(f.file);
3854 if (IS_ERR(pid)) {
3855 ret = PTR_ERR(pid);
3856 goto err;
3857 }
3858
3859 ret = -EINVAL;
3860 if (!access_pidfd_pidns(pid))
3861 goto err;
3862
3863 if (info) {
3864 ret = copy_siginfo_from_user_any(&kinfo, info);
3865 if (unlikely(ret))
3866 goto err;
3867
3868 ret = -EINVAL;
3869 if (unlikely(sig != kinfo.si_signo))
3870 goto err;
3871
3872
3873 ret = -EPERM;
3874 if ((task_pid(current) != pid) &&
3875 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3876 goto err;
3877 } else {
3878 prepare_kill_siginfo(sig, &kinfo);
3879 }
3880
3881 ret = kill_pid_info(sig, &kinfo, pid);
3882
3883err:
3884 fdput(f);
3885 return ret;
3886}
3887
3888static int
3889do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3890{
3891 struct task_struct *p;
3892 int error = -ESRCH;
3893
3894 rcu_read_lock();
3895 p = find_task_by_vpid(pid);
3896 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3897 error = check_kill_permission(sig, info, p);
3898
3899
3900
3901
3902 if (!error && sig) {
3903 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3904
3905
3906
3907
3908
3909 if (unlikely(error == -ESRCH))
3910 error = 0;
3911 }
3912 }
3913 rcu_read_unlock();
3914
3915 return error;
3916}
3917
3918static int do_tkill(pid_t tgid, pid_t pid, int sig)
3919{
3920 struct kernel_siginfo info;
3921
3922 clear_siginfo(&info);
3923 info.si_signo = sig;
3924 info.si_errno = 0;
3925 info.si_code = SI_TKILL;
3926 info.si_pid = task_tgid_vnr(current);
3927 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3928
3929 return do_send_specific(tgid, pid, sig, &info);
3930}
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3943{
3944
3945 if (pid <= 0 || tgid <= 0)
3946 return -EINVAL;
3947
3948 return do_tkill(tgid, pid, sig);
3949}
3950
3951
3952
3953
3954
3955
3956
3957
3958SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3959{
3960
3961 if (pid <= 0)
3962 return -EINVAL;
3963
3964 return do_tkill(0, pid, sig);
3965}
3966
3967static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3968{
3969
3970
3971
3972 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3973 (task_pid_vnr(current) != pid))
3974 return -EPERM;
3975
3976
3977 return kill_proc_info(sig, info, pid);
3978}
3979
3980
3981
3982
3983
3984
3985
3986SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3987 siginfo_t __user *, uinfo)
3988{
3989 kernel_siginfo_t info;
3990 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3991 if (unlikely(ret))
3992 return ret;
3993 return do_rt_sigqueueinfo(pid, sig, &info);
3994}
3995
3996#ifdef CONFIG_COMPAT
3997COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3998 compat_pid_t, pid,
3999 int, sig,
4000 struct compat_siginfo __user *, uinfo)
4001{
4002 kernel_siginfo_t info;
4003 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4004 if (unlikely(ret))
4005 return ret;
4006 return do_rt_sigqueueinfo(pid, sig, &info);
4007}
4008#endif
4009
4010static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4011{
4012
4013 if (pid <= 0 || tgid <= 0)
4014 return -EINVAL;
4015
4016
4017
4018
4019 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4020 (task_pid_vnr(current) != pid))
4021 return -EPERM;
4022
4023 return do_send_specific(tgid, pid, sig, info);
4024}
4025
4026SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4027 siginfo_t __user *, uinfo)
4028{
4029 kernel_siginfo_t info;
4030 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4031 if (unlikely(ret))
4032 return ret;
4033 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4034}
4035
4036#ifdef CONFIG_COMPAT
4037COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4038 compat_pid_t, tgid,
4039 compat_pid_t, pid,
4040 int, sig,
4041 struct compat_siginfo __user *, uinfo)
4042{
4043 kernel_siginfo_t info;
4044 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4045 if (unlikely(ret))
4046 return ret;
4047 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4048}
4049#endif
4050
4051
4052
4053
4054void kernel_sigaction(int sig, __sighandler_t action)
4055{
4056 spin_lock_irq(¤t->sighand->siglock);
4057 current->sighand->action[sig - 1].sa.sa_handler = action;
4058 if (action == SIG_IGN) {
4059 sigset_t mask;
4060
4061 sigemptyset(&mask);
4062 sigaddset(&mask, sig);
4063
4064 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4065 flush_sigqueue_mask(&mask, ¤t->pending);
4066 recalc_sigpending();
4067 }
4068 spin_unlock_irq(¤t->sighand->siglock);
4069}
4070EXPORT_SYMBOL(kernel_sigaction);
4071
4072void __weak sigaction_compat_abi(struct k_sigaction *act,
4073 struct k_sigaction *oact)
4074{
4075}
4076
4077int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4078{
4079 struct task_struct *p = current, *t;
4080 struct k_sigaction *k;
4081 sigset_t mask;
4082
4083 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4084 return -EINVAL;
4085
4086 k = &p->sighand->action[sig-1];
4087
4088 spin_lock_irq(&p->sighand->siglock);
4089 if (k->sa.sa_flags & SA_IMMUTABLE) {
4090 spin_unlock_irq(&p->sighand->siglock);
4091 return -EINVAL;
4092 }
4093 if (oact)
4094 *oact = *k;
4095
4096
4097
4098
4099
4100 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4101
4102
4103
4104
4105
4106
4107 if (act)
4108 act->sa.sa_flags &= UAPI_SA_FLAGS;
4109 if (oact)
4110 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4111
4112 sigaction_compat_abi(act, oact);
4113
4114 if (act) {
4115 sigdelsetmask(&act->sa.sa_mask,
4116 sigmask(SIGKILL) | sigmask(SIGSTOP));
4117 *k = *act;
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4130 sigemptyset(&mask);
4131 sigaddset(&mask, sig);
4132 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4133 for_each_thread(p, t)
4134 flush_sigqueue_mask(&mask, &t->pending);
4135 }
4136 }
4137
4138 spin_unlock_irq(&p->sighand->siglock);
4139 return 0;
4140}
4141
4142#ifdef CONFIG_DYNAMIC_SIGFRAME
4143static inline void sigaltstack_lock(void)
4144 __acquires(¤t->sighand->siglock)
4145{
4146 spin_lock_irq(¤t->sighand->siglock);
4147}
4148
4149static inline void sigaltstack_unlock(void)
4150 __releases(¤t->sighand->siglock)
4151{
4152 spin_unlock_irq(¤t->sighand->siglock);
4153}
4154#else
4155static inline void sigaltstack_lock(void) { }
4156static inline void sigaltstack_unlock(void) { }
4157#endif
4158
4159static int
4160do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4161 size_t min_ss_size)
4162{
4163 struct task_struct *t = current;
4164 int ret = 0;
4165
4166 if (oss) {
4167 memset(oss, 0, sizeof(stack_t));
4168 oss->ss_sp = (void __user *) t->sas_ss_sp;
4169 oss->ss_size = t->sas_ss_size;
4170 oss->ss_flags = sas_ss_flags(sp) |
4171 (current->sas_ss_flags & SS_FLAG_BITS);
4172 }
4173
4174 if (ss) {
4175 void __user *ss_sp = ss->ss_sp;
4176 size_t ss_size = ss->ss_size;
4177 unsigned ss_flags = ss->ss_flags;
4178 int ss_mode;
4179
4180 if (unlikely(on_sig_stack(sp)))
4181 return -EPERM;
4182
4183 ss_mode = ss_flags & ~SS_FLAG_BITS;
4184 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4185 ss_mode != 0))
4186 return -EINVAL;
4187
4188
4189
4190
4191
4192 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4193 t->sas_ss_size == ss_size &&
4194 t->sas_ss_flags == ss_flags)
4195 return 0;
4196
4197 sigaltstack_lock();
4198 if (ss_mode == SS_DISABLE) {
4199 ss_size = 0;
4200 ss_sp = NULL;
4201 } else {
4202 if (unlikely(ss_size < min_ss_size))
4203 ret = -ENOMEM;
4204 if (!sigaltstack_size_valid(ss_size))
4205 ret = -ENOMEM;
4206 }
4207 if (!ret) {
4208 t->sas_ss_sp = (unsigned long) ss_sp;
4209 t->sas_ss_size = ss_size;
4210 t->sas_ss_flags = ss_flags;
4211 }
4212 sigaltstack_unlock();
4213 }
4214 return ret;
4215}
4216
4217SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4218{
4219 stack_t new, old;
4220 int err;
4221 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4222 return -EFAULT;
4223 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4224 current_user_stack_pointer(),
4225 MINSIGSTKSZ);
4226 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4227 err = -EFAULT;
4228 return err;
4229}
4230
4231int restore_altstack(const stack_t __user *uss)
4232{
4233 stack_t new;
4234 if (copy_from_user(&new, uss, sizeof(stack_t)))
4235 return -EFAULT;
4236 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4237 MINSIGSTKSZ);
4238
4239 return 0;
4240}
4241
4242int __save_altstack(stack_t __user *uss, unsigned long sp)
4243{
4244 struct task_struct *t = current;
4245 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4246 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4247 __put_user(t->sas_ss_size, &uss->ss_size);
4248 return err;
4249}
4250
4251#ifdef CONFIG_COMPAT
4252static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4253 compat_stack_t __user *uoss_ptr)
4254{
4255 stack_t uss, uoss;
4256 int ret;
4257
4258 if (uss_ptr) {
4259 compat_stack_t uss32;
4260 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4261 return -EFAULT;
4262 uss.ss_sp = compat_ptr(uss32.ss_sp);
4263 uss.ss_flags = uss32.ss_flags;
4264 uss.ss_size = uss32.ss_size;
4265 }
4266 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4267 compat_user_stack_pointer(),
4268 COMPAT_MINSIGSTKSZ);
4269 if (ret >= 0 && uoss_ptr) {
4270 compat_stack_t old;
4271 memset(&old, 0, sizeof(old));
4272 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4273 old.ss_flags = uoss.ss_flags;
4274 old.ss_size = uoss.ss_size;
4275 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4276 ret = -EFAULT;
4277 }
4278 return ret;
4279}
4280
4281COMPAT_SYSCALL_DEFINE2(sigaltstack,
4282 const compat_stack_t __user *, uss_ptr,
4283 compat_stack_t __user *, uoss_ptr)
4284{
4285 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4286}
4287
4288int compat_restore_altstack(const compat_stack_t __user *uss)
4289{
4290 int err = do_compat_sigaltstack(uss, NULL);
4291
4292 return err == -EFAULT ? err : 0;
4293}
4294
4295int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4296{
4297 int err;
4298 struct task_struct *t = current;
4299 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4300 &uss->ss_sp) |
4301 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4302 __put_user(t->sas_ss_size, &uss->ss_size);
4303 return err;
4304}
4305#endif
4306
4307#ifdef __ARCH_WANT_SYS_SIGPENDING
4308
4309
4310
4311
4312
4313SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4314{
4315 sigset_t set;
4316
4317 if (sizeof(old_sigset_t) > sizeof(*uset))
4318 return -EINVAL;
4319
4320 do_sigpending(&set);
4321
4322 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4323 return -EFAULT;
4324
4325 return 0;
4326}
4327
4328#ifdef CONFIG_COMPAT
4329COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4330{
4331 sigset_t set;
4332
4333 do_sigpending(&set);
4334
4335 return put_user(set.sig[0], set32);
4336}
4337#endif
4338
4339#endif
4340
4341#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4353 old_sigset_t __user *, oset)
4354{
4355 old_sigset_t old_set, new_set;
4356 sigset_t new_blocked;
4357
4358 old_set = current->blocked.sig[0];
4359
4360 if (nset) {
4361 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4362 return -EFAULT;
4363
4364 new_blocked = current->blocked;
4365
4366 switch (how) {
4367 case SIG_BLOCK:
4368 sigaddsetmask(&new_blocked, new_set);
4369 break;
4370 case SIG_UNBLOCK:
4371 sigdelsetmask(&new_blocked, new_set);
4372 break;
4373 case SIG_SETMASK:
4374 new_blocked.sig[0] = new_set;
4375 break;
4376 default:
4377 return -EINVAL;
4378 }
4379
4380 set_current_blocked(&new_blocked);
4381 }
4382
4383 if (oset) {
4384 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4385 return -EFAULT;
4386 }
4387
4388 return 0;
4389}
4390#endif
4391
4392#ifndef CONFIG_ODD_RT_SIGACTION
4393
4394
4395
4396
4397
4398
4399
4400SYSCALL_DEFINE4(rt_sigaction, int, sig,
4401 const struct sigaction __user *, act,
4402 struct sigaction __user *, oact,
4403 size_t, sigsetsize)
4404{
4405 struct k_sigaction new_sa, old_sa;
4406 int ret;
4407
4408
4409 if (sigsetsize != sizeof(sigset_t))
4410 return -EINVAL;
4411
4412 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4413 return -EFAULT;
4414
4415 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4416 if (ret)
4417 return ret;
4418
4419 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4420 return -EFAULT;
4421
4422 return 0;
4423}
4424#ifdef CONFIG_COMPAT
4425COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4426 const struct compat_sigaction __user *, act,
4427 struct compat_sigaction __user *, oact,
4428 compat_size_t, sigsetsize)
4429{
4430 struct k_sigaction new_ka, old_ka;
4431#ifdef __ARCH_HAS_SA_RESTORER
4432 compat_uptr_t restorer;
4433#endif
4434 int ret;
4435
4436
4437 if (sigsetsize != sizeof(compat_sigset_t))
4438 return -EINVAL;
4439
4440 if (act) {
4441 compat_uptr_t handler;
4442 ret = get_user(handler, &act->sa_handler);
4443 new_ka.sa.sa_handler = compat_ptr(handler);
4444#ifdef __ARCH_HAS_SA_RESTORER
4445 ret |= get_user(restorer, &act->sa_restorer);
4446 new_ka.sa.sa_restorer = compat_ptr(restorer);
4447#endif
4448 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4449 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4450 if (ret)
4451 return -EFAULT;
4452 }
4453
4454 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4455 if (!ret && oact) {
4456 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4457 &oact->sa_handler);
4458 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4459 sizeof(oact->sa_mask));
4460 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4461#ifdef __ARCH_HAS_SA_RESTORER
4462 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4463 &oact->sa_restorer);
4464#endif
4465 }
4466 return ret;
4467}
4468#endif
4469#endif
4470
4471#ifdef CONFIG_OLD_SIGACTION
4472SYSCALL_DEFINE3(sigaction, int, sig,
4473 const struct old_sigaction __user *, act,
4474 struct old_sigaction __user *, oact)
4475{
4476 struct k_sigaction new_ka, old_ka;
4477 int ret;
4478
4479 if (act) {
4480 old_sigset_t mask;
4481 if (!access_ok(act, sizeof(*act)) ||
4482 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4483 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4484 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4485 __get_user(mask, &act->sa_mask))
4486 return -EFAULT;
4487#ifdef __ARCH_HAS_KA_RESTORER
4488 new_ka.ka_restorer = NULL;
4489#endif
4490 siginitset(&new_ka.sa.sa_mask, mask);
4491 }
4492
4493 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4494
4495 if (!ret && oact) {
4496 if (!access_ok(oact, sizeof(*oact)) ||
4497 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4498 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4499 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4500 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4501 return -EFAULT;
4502 }
4503
4504 return ret;
4505}
4506#endif
4507#ifdef CONFIG_COMPAT_OLD_SIGACTION
4508COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4509 const struct compat_old_sigaction __user *, act,
4510 struct compat_old_sigaction __user *, oact)
4511{
4512 struct k_sigaction new_ka, old_ka;
4513 int ret;
4514 compat_old_sigset_t mask;
4515 compat_uptr_t handler, restorer;
4516
4517 if (act) {
4518 if (!access_ok(act, sizeof(*act)) ||
4519 __get_user(handler, &act->sa_handler) ||
4520 __get_user(restorer, &act->sa_restorer) ||
4521 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4522 __get_user(mask, &act->sa_mask))
4523 return -EFAULT;
4524
4525#ifdef __ARCH_HAS_KA_RESTORER
4526 new_ka.ka_restorer = NULL;
4527#endif
4528 new_ka.sa.sa_handler = compat_ptr(handler);
4529 new_ka.sa.sa_restorer = compat_ptr(restorer);
4530 siginitset(&new_ka.sa.sa_mask, mask);
4531 }
4532
4533 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4534
4535 if (!ret && oact) {
4536 if (!access_ok(oact, sizeof(*oact)) ||
4537 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4538 &oact->sa_handler) ||
4539 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4540 &oact->sa_restorer) ||
4541 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4542 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4543 return -EFAULT;
4544 }
4545 return ret;
4546}
4547#endif
4548
4549#ifdef CONFIG_SGETMASK_SYSCALL
4550
4551
4552
4553
4554SYSCALL_DEFINE0(sgetmask)
4555{
4556
4557 return current->blocked.sig[0];
4558}
4559
4560SYSCALL_DEFINE1(ssetmask, int, newmask)
4561{
4562 int old = current->blocked.sig[0];
4563 sigset_t newset;
4564
4565 siginitset(&newset, newmask);
4566 set_current_blocked(&newset);
4567
4568 return old;
4569}
4570#endif
4571
4572#ifdef __ARCH_WANT_SYS_SIGNAL
4573
4574
4575
4576SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4577{
4578 struct k_sigaction new_sa, old_sa;
4579 int ret;
4580
4581 new_sa.sa.sa_handler = handler;
4582 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4583 sigemptyset(&new_sa.sa.sa_mask);
4584
4585 ret = do_sigaction(sig, &new_sa, &old_sa);
4586
4587 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4588}
4589#endif
4590
4591#ifdef __ARCH_WANT_SYS_PAUSE
4592
4593SYSCALL_DEFINE0(pause)
4594{
4595 while (!signal_pending(current)) {
4596 __set_current_state(TASK_INTERRUPTIBLE);
4597 schedule();
4598 }
4599 return -ERESTARTNOHAND;
4600}
4601
4602#endif
4603
4604static int sigsuspend(sigset_t *set)
4605{
4606 current->saved_sigmask = current->blocked;
4607 set_current_blocked(set);
4608
4609 while (!signal_pending(current)) {
4610 __set_current_state(TASK_INTERRUPTIBLE);
4611 schedule();
4612 }
4613 set_restore_sigmask();
4614 return -ERESTARTNOHAND;
4615}
4616
4617
4618
4619
4620
4621
4622
4623SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4624{
4625 sigset_t newset;
4626
4627
4628 if (sigsetsize != sizeof(sigset_t))
4629 return -EINVAL;
4630
4631 if (copy_from_user(&newset, unewset, sizeof(newset)))
4632 return -EFAULT;
4633 return sigsuspend(&newset);
4634}
4635
4636#ifdef CONFIG_COMPAT
4637COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4638{
4639 sigset_t newset;
4640
4641
4642 if (sigsetsize != sizeof(sigset_t))
4643 return -EINVAL;
4644
4645 if (get_compat_sigset(&newset, unewset))
4646 return -EFAULT;
4647 return sigsuspend(&newset);
4648}
4649#endif
4650
4651#ifdef CONFIG_OLD_SIGSUSPEND
4652SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4653{
4654 sigset_t blocked;
4655 siginitset(&blocked, mask);
4656 return sigsuspend(&blocked);
4657}
4658#endif
4659#ifdef CONFIG_OLD_SIGSUSPEND3
4660SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4661{
4662 sigset_t blocked;
4663 siginitset(&blocked, mask);
4664 return sigsuspend(&blocked);
4665}
4666#endif
4667
4668__weak const char *arch_vma_name(struct vm_area_struct *vma)
4669{
4670 return NULL;
4671}
4672
4673static inline void siginfo_buildtime_checks(void)
4674{
4675 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4676
4677
4678#define CHECK_OFFSET(field) \
4679 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4680
4681
4682 CHECK_OFFSET(si_pid);
4683 CHECK_OFFSET(si_uid);
4684
4685
4686 CHECK_OFFSET(si_tid);
4687 CHECK_OFFSET(si_overrun);
4688 CHECK_OFFSET(si_value);
4689
4690
4691 CHECK_OFFSET(si_pid);
4692 CHECK_OFFSET(si_uid);
4693 CHECK_OFFSET(si_value);
4694
4695
4696 CHECK_OFFSET(si_pid);
4697 CHECK_OFFSET(si_uid);
4698 CHECK_OFFSET(si_status);
4699 CHECK_OFFSET(si_utime);
4700 CHECK_OFFSET(si_stime);
4701
4702
4703 CHECK_OFFSET(si_addr);
4704 CHECK_OFFSET(si_trapno);
4705 CHECK_OFFSET(si_addr_lsb);
4706 CHECK_OFFSET(si_lower);
4707 CHECK_OFFSET(si_upper);
4708 CHECK_OFFSET(si_pkey);
4709 CHECK_OFFSET(si_perf_data);
4710 CHECK_OFFSET(si_perf_type);
4711
4712
4713 CHECK_OFFSET(si_band);
4714 CHECK_OFFSET(si_fd);
4715
4716
4717 CHECK_OFFSET(si_call_addr);
4718 CHECK_OFFSET(si_syscall);
4719 CHECK_OFFSET(si_arch);
4720#undef CHECK_OFFSET
4721
4722
4723 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4724 offsetof(struct siginfo, si_addr));
4725 if (sizeof(int) == sizeof(void __user *)) {
4726 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4727 sizeof(void __user *));
4728 } else {
4729 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4730 sizeof_field(struct siginfo, si_uid)) !=
4731 sizeof(void __user *));
4732 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4733 offsetof(struct siginfo, si_uid));
4734 }
4735#ifdef CONFIG_COMPAT
4736 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4737 offsetof(struct compat_siginfo, si_addr));
4738 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4739 sizeof(compat_uptr_t));
4740 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4741 sizeof_field(struct siginfo, si_pid));
4742#endif
4743}
4744
4745void __init signals_init(void)
4746{
4747 siginfo_buildtime_checks();
4748
4749 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4750}
4751
4752#ifdef CONFIG_KGDB_KDB
4753#include <linux/kdb.h>
4754
4755
4756
4757
4758
4759
4760void kdb_send_sig(struct task_struct *t, int sig)
4761{
4762 static struct task_struct *kdb_prev_t;
4763 int new_t, ret;
4764 if (!spin_trylock(&t->sighand->siglock)) {
4765 kdb_printf("Can't do kill command now.\n"
4766 "The sigmask lock is held somewhere else in "
4767 "kernel, try again later\n");
4768 return;
4769 }
4770 new_t = kdb_prev_t != t;
4771 kdb_prev_t = t;
4772 if (!task_is_running(t) && new_t) {
4773 spin_unlock(&t->sighand->siglock);
4774 kdb_printf("Process is not RUNNING, sending a signal from "
4775 "kdb risks deadlock\n"
4776 "on the run queue locks. "
4777 "The signal has _not_ been sent.\n"
4778 "Reissue the kill command if you want to risk "
4779 "the deadlock.\n");
4780 return;
4781 }
4782 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4783 spin_unlock(&t->sighand->siglock);
4784 if (ret)
4785 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4786 sig, t->pid);
4787 else
4788 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4789}
4790#endif
4791