1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/init.h>
17#include <linux/sched/mm.h>
18#include <linux/sched/user.h>
19#include <linux/sched/debug.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/sched/cputime.h>
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/proc_fs.h>
26#include <linux/tty.h>
27#include <linux/binfmts.h>
28#include <linux/coredump.h>
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/ptrace.h>
32#include <linux/signal.h>
33#include <linux/signalfd.h>
34#include <linux/ratelimit.h>
35#include <linux/tracehook.h>
36#include <linux/capability.h>
37#include <linux/freezer.h>
38#include <linux/pid_namespace.h>
39#include <linux/nsproxy.h>
40#include <linux/user_namespace.h>
41#include <linux/uprobes.h>
42#include <linux/compat.h>
43#include <linux/cn_proc.h>
44#include <linux/compiler.h>
45#include <linux/posix-timers.h>
46#include <linux/livepatch.h>
47#include <linux/cgroup.h>
48#include <linux/audit.h>
49
50#define CREATE_TRACE_POINTS
51#include <trace/events/signal.h>
52
53#include <asm/param.h>
54#include <linux/uaccess.h>
55#include <asm/unistd.h>
56#include <asm/siginfo.h>
57#include <asm/cacheflush.h>
58
59
60
61
62
63static struct kmem_cache *sigqueue_cachep;
64
65int print_fatal_signals __read_mostly;
66
67static void __user *sig_handler(struct task_struct *t, int sig)
68{
69 return t->sighand->action[sig - 1].sa.sa_handler;
70}
71
72static inline bool sig_handler_ignored(void __user *handler, int sig)
73{
74
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
77}
78
79static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
80{
81 void __user *handler;
82
83 handler = sig_handler(t, sig);
84
85
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87 return true;
88
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 return true;
92
93
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
96 return true;
97
98 return sig_handler_ignored(handler, sig);
99}
100
101static bool sig_ignored(struct task_struct *t, int sig, bool force)
102{
103
104
105
106
107
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
109 return false;
110
111
112
113
114
115
116 if (t->ptrace && sig != SIGKILL)
117 return false;
118
119 return sig_task_ignored(t, sig, force);
120}
121
122
123
124
125
126static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127{
128 unsigned long ready;
129 long i;
130
131 switch (_NSIG_WORDS) {
132 default:
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
135 break;
136
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
141 break;
142
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
145 break;
146
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
148 }
149 return ready != 0;
150}
151
152#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153
154static bool recalc_sigpending_tsk(struct task_struct *t)
155{
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
161 return true;
162 }
163
164
165
166
167
168
169 return false;
170}
171
172
173
174
175
176void recalc_sigpending_and_wake(struct task_struct *t)
177{
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
180}
181
182void recalc_sigpending(void)
183{
184 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
185 !klp_patch_pending(current))
186 clear_thread_flag(TIF_SIGPENDING);
187
188}
189EXPORT_SYMBOL(recalc_sigpending);
190
191void calculate_sigpending(void)
192{
193
194
195
196 spin_lock_irq(¤t->sighand->siglock);
197 set_tsk_thread_flag(current, TIF_SIGPENDING);
198 recalc_sigpending();
199 spin_unlock_irq(¤t->sighand->siglock);
200}
201
202
203
204#define SYNCHRONOUS_MASK \
205 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
206 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
207
208int next_signal(struct sigpending *pending, sigset_t *mask)
209{
210 unsigned long i, *s, *m, x;
211 int sig = 0;
212
213 s = pending->signal.sig;
214 m = mask->sig;
215
216
217
218
219
220 x = *s &~ *m;
221 if (x) {
222 if (x & SYNCHRONOUS_MASK)
223 x &= SYNCHRONOUS_MASK;
224 sig = ffz(~x) + 1;
225 return sig;
226 }
227
228 switch (_NSIG_WORDS) {
229 default:
230 for (i = 1; i < _NSIG_WORDS; ++i) {
231 x = *++s &~ *++m;
232 if (!x)
233 continue;
234 sig = ffz(~x) + i*_NSIG_BPW + 1;
235 break;
236 }
237 break;
238
239 case 2:
240 x = s[1] &~ m[1];
241 if (!x)
242 break;
243 sig = ffz(~x) + _NSIG_BPW + 1;
244 break;
245
246 case 1:
247
248 break;
249 }
250
251 return sig;
252}
253
254static inline void print_dropped_signal(int sig)
255{
256 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
257
258 if (!print_fatal_signals)
259 return;
260
261 if (!__ratelimit(&ratelimit_state))
262 return;
263
264 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
265 current->comm, current->pid, sig);
266}
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
286{
287 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
290
291 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
292 return false;
293
294 if (mask & JOBCTL_STOP_SIGMASK)
295 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
296
297 task->jobctl |= mask;
298 return true;
299}
300
301
302
303
304
305
306
307
308
309
310
311
312
313void task_clear_jobctl_trapping(struct task_struct *task)
314{
315 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
316 task->jobctl &= ~JOBCTL_TRAPPING;
317 smp_mb();
318 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
319 }
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
338{
339 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
340
341 if (mask & JOBCTL_STOP_PENDING)
342 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
343
344 task->jobctl &= ~mask;
345
346 if (!(task->jobctl & JOBCTL_PENDING_MASK))
347 task_clear_jobctl_trapping(task);
348}
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366static bool task_participate_group_stop(struct task_struct *task)
367{
368 struct signal_struct *sig = task->signal;
369 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
370
371 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
372
373 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
374
375 if (!consume)
376 return false;
377
378 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
379 sig->group_stop_count--;
380
381
382
383
384
385 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
386 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
387 return true;
388 }
389 return false;
390}
391
392void task_join_group_stop(struct task_struct *task)
393{
394
395 unsigned long jobctl = current->jobctl;
396 if (jobctl & JOBCTL_STOP_PENDING) {
397 struct signal_struct *sig = current->signal;
398 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
399 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
400 if (task_set_jobctl_pending(task, signr | gstop)) {
401 sig->group_stop_count++;
402 }
403 }
404}
405
406
407
408
409
410
411static struct sigqueue *
412__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
413{
414 struct sigqueue *q = NULL;
415 struct user_struct *user;
416
417
418
419
420
421 rcu_read_lock();
422 user = get_uid(__task_cred(t)->user);
423 atomic_inc(&user->sigpending);
424 rcu_read_unlock();
425
426 if (override_rlimit ||
427 atomic_read(&user->sigpending) <=
428 task_rlimit(t, RLIMIT_SIGPENDING)) {
429 q = kmem_cache_alloc(sigqueue_cachep, flags);
430 } else {
431 print_dropped_signal(sig);
432 }
433
434 if (unlikely(q == NULL)) {
435 atomic_dec(&user->sigpending);
436 free_uid(user);
437 } else {
438 INIT_LIST_HEAD(&q->list);
439 q->flags = 0;
440 q->user = user;
441 }
442
443 return q;
444}
445
446static void __sigqueue_free(struct sigqueue *q)
447{
448 if (q->flags & SIGQUEUE_PREALLOC)
449 return;
450 atomic_dec(&q->user->sigpending);
451 free_uid(q->user);
452 kmem_cache_free(sigqueue_cachep, q);
453}
454
455void flush_sigqueue(struct sigpending *queue)
456{
457 struct sigqueue *q;
458
459 sigemptyset(&queue->signal);
460 while (!list_empty(&queue->list)) {
461 q = list_entry(queue->list.next, struct sigqueue , list);
462 list_del_init(&q->list);
463 __sigqueue_free(q);
464 }
465}
466
467
468
469
470void flush_signals(struct task_struct *t)
471{
472 unsigned long flags;
473
474 spin_lock_irqsave(&t->sighand->siglock, flags);
475 clear_tsk_thread_flag(t, TIF_SIGPENDING);
476 flush_sigqueue(&t->pending);
477 flush_sigqueue(&t->signal->shared_pending);
478 spin_unlock_irqrestore(&t->sighand->siglock, flags);
479}
480EXPORT_SYMBOL(flush_signals);
481
482#ifdef CONFIG_POSIX_TIMERS
483static void __flush_itimer_signals(struct sigpending *pending)
484{
485 sigset_t signal, retain;
486 struct sigqueue *q, *n;
487
488 signal = pending->signal;
489 sigemptyset(&retain);
490
491 list_for_each_entry_safe(q, n, &pending->list, list) {
492 int sig = q->info.si_signo;
493
494 if (likely(q->info.si_code != SI_TIMER)) {
495 sigaddset(&retain, sig);
496 } else {
497 sigdelset(&signal, sig);
498 list_del_init(&q->list);
499 __sigqueue_free(q);
500 }
501 }
502
503 sigorsets(&pending->signal, &signal, &retain);
504}
505
506void flush_itimer_signals(void)
507{
508 struct task_struct *tsk = current;
509 unsigned long flags;
510
511 spin_lock_irqsave(&tsk->sighand->siglock, flags);
512 __flush_itimer_signals(&tsk->pending);
513 __flush_itimer_signals(&tsk->signal->shared_pending);
514 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
515}
516#endif
517
518void ignore_signals(struct task_struct *t)
519{
520 int i;
521
522 for (i = 0; i < _NSIG; ++i)
523 t->sighand->action[i].sa.sa_handler = SIG_IGN;
524
525 flush_signals(t);
526}
527
528
529
530
531
532void
533flush_signal_handlers(struct task_struct *t, int force_default)
534{
535 int i;
536 struct k_sigaction *ka = &t->sighand->action[0];
537 for (i = _NSIG ; i != 0 ; i--) {
538 if (force_default || ka->sa.sa_handler != SIG_IGN)
539 ka->sa.sa_handler = SIG_DFL;
540 ka->sa.sa_flags = 0;
541#ifdef __ARCH_HAS_SA_RESTORER
542 ka->sa.sa_restorer = NULL;
543#endif
544 sigemptyset(&ka->sa.sa_mask);
545 ka++;
546 }
547}
548
549bool unhandled_signal(struct task_struct *tsk, int sig)
550{
551 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
552 if (is_global_init(tsk))
553 return true;
554
555 if (handler != SIG_IGN && handler != SIG_DFL)
556 return false;
557
558
559 return !tsk->ptrace;
560}
561
562static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
563 bool *resched_timer)
564{
565 struct sigqueue *q, *first = NULL;
566
567
568
569
570
571 list_for_each_entry(q, &list->list, list) {
572 if (q->info.si_signo == sig) {
573 if (first)
574 goto still_pending;
575 first = q;
576 }
577 }
578
579 sigdelset(&list->signal, sig);
580
581 if (first) {
582still_pending:
583 list_del_init(&first->list);
584 copy_siginfo(info, &first->info);
585
586 *resched_timer =
587 (first->flags & SIGQUEUE_PREALLOC) &&
588 (info->si_code == SI_TIMER) &&
589 (info->si_sys_private);
590
591 __sigqueue_free(first);
592 } else {
593
594
595
596
597
598 clear_siginfo(info);
599 info->si_signo = sig;
600 info->si_errno = 0;
601 info->si_code = SI_USER;
602 info->si_pid = 0;
603 info->si_uid = 0;
604 }
605}
606
607static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
608 kernel_siginfo_t *info, bool *resched_timer)
609{
610 int sig = next_signal(pending, mask);
611
612 if (sig)
613 collect_signal(sig, pending, info, resched_timer);
614 return sig;
615}
616
617
618
619
620
621
622
623int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
624{
625 bool resched_timer = false;
626 int signr;
627
628
629
630
631 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
632 if (!signr) {
633 signr = __dequeue_signal(&tsk->signal->shared_pending,
634 mask, info, &resched_timer);
635#ifdef CONFIG_POSIX_TIMERS
636
637
638
639
640
641
642
643
644
645
646
647
648
649 if (unlikely(signr == SIGALRM)) {
650 struct hrtimer *tmr = &tsk->signal->real_timer;
651
652 if (!hrtimer_is_queued(tmr) &&
653 tsk->signal->it_real_incr != 0) {
654 hrtimer_forward(tmr, tmr->base->get_time(),
655 tsk->signal->it_real_incr);
656 hrtimer_restart(tmr);
657 }
658 }
659#endif
660 }
661
662 recalc_sigpending();
663 if (!signr)
664 return 0;
665
666 if (unlikely(sig_kernel_stop(signr))) {
667
668
669
670
671
672
673
674
675
676
677
678
679 current->jobctl |= JOBCTL_STOP_DEQUEUED;
680 }
681#ifdef CONFIG_POSIX_TIMERS
682 if (resched_timer) {
683
684
685
686
687
688
689 spin_unlock(&tsk->sighand->siglock);
690 posixtimer_rearm(info);
691 spin_lock(&tsk->sighand->siglock);
692
693
694 info->si_sys_private = 0;
695 }
696#endif
697 return signr;
698}
699EXPORT_SYMBOL_GPL(dequeue_signal);
700
701static int dequeue_synchronous_signal(kernel_siginfo_t *info)
702{
703 struct task_struct *tsk = current;
704 struct sigpending *pending = &tsk->pending;
705 struct sigqueue *q, *sync = NULL;
706
707
708
709
710 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
711 return 0;
712
713
714
715
716 list_for_each_entry(q, &pending->list, list) {
717
718 if ((q->info.si_code > SI_USER) &&
719 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
720 sync = q;
721 goto next;
722 }
723 }
724 return 0;
725next:
726
727
728
729 list_for_each_entry_continue(q, &pending->list, list) {
730 if (q->info.si_signo == sync->info.si_signo)
731 goto still_pending;
732 }
733
734 sigdelset(&pending->signal, sync->info.si_signo);
735 recalc_sigpending();
736still_pending:
737 list_del_init(&sync->list);
738 copy_siginfo(info, &sync->info);
739 __sigqueue_free(sync);
740 return info->si_signo;
741}
742
743
744
745
746
747
748
749
750
751
752
753
754void signal_wake_up_state(struct task_struct *t, unsigned int state)
755{
756 set_tsk_thread_flag(t, TIF_SIGPENDING);
757
758
759
760
761
762
763
764 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
765 kick_process(t);
766}
767
768
769
770
771
772
773
774static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
775{
776 struct sigqueue *q, *n;
777 sigset_t m;
778
779 sigandsets(&m, mask, &s->signal);
780 if (sigisemptyset(&m))
781 return;
782
783 sigandnsets(&s->signal, &s->signal, mask);
784 list_for_each_entry_safe(q, n, &s->list, list) {
785 if (sigismember(mask, q->info.si_signo)) {
786 list_del_init(&q->list);
787 __sigqueue_free(q);
788 }
789 }
790}
791
792static inline int is_si_special(const struct kernel_siginfo *info)
793{
794 return info <= SEND_SIG_PRIV;
795}
796
797static inline bool si_fromuser(const struct kernel_siginfo *info)
798{
799 return info == SEND_SIG_NOINFO ||
800 (!is_si_special(info) && SI_FROMUSER(info));
801}
802
803
804
805
806static bool kill_ok_by_cred(struct task_struct *t)
807{
808 const struct cred *cred = current_cred();
809 const struct cred *tcred = __task_cred(t);
810
811 return uid_eq(cred->euid, tcred->suid) ||
812 uid_eq(cred->euid, tcred->uid) ||
813 uid_eq(cred->uid, tcred->suid) ||
814 uid_eq(cred->uid, tcred->uid) ||
815 ns_capable(tcred->user_ns, CAP_KILL);
816}
817
818
819
820
821
822static int check_kill_permission(int sig, struct kernel_siginfo *info,
823 struct task_struct *t)
824{
825 struct pid *sid;
826 int error;
827
828 if (!valid_signal(sig))
829 return -EINVAL;
830
831 if (!si_fromuser(info))
832 return 0;
833
834 error = audit_signal_info(sig, t);
835 if (error)
836 return error;
837
838 if (!same_thread_group(current, t) &&
839 !kill_ok_by_cred(t)) {
840 switch (sig) {
841 case SIGCONT:
842 sid = task_session(t);
843
844
845
846
847 if (!sid || sid == task_session(current))
848 break;
849
850 default:
851 return -EPERM;
852 }
853 }
854
855 return security_task_kill(t, info, sig, NULL);
856}
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875static void ptrace_trap_notify(struct task_struct *t)
876{
877 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
878 assert_spin_locked(&t->sighand->siglock);
879
880 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
881 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
882}
883
884
885
886
887
888
889
890
891
892
893
894static bool prepare_signal(int sig, struct task_struct *p, bool force)
895{
896 struct signal_struct *signal = p->signal;
897 struct task_struct *t;
898 sigset_t flush;
899
900 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
901 if (!(signal->flags & SIGNAL_GROUP_EXIT))
902 return sig == SIGKILL;
903
904
905
906 } else if (sig_kernel_stop(sig)) {
907
908
909
910 siginitset(&flush, sigmask(SIGCONT));
911 flush_sigqueue_mask(&flush, &signal->shared_pending);
912 for_each_thread(p, t)
913 flush_sigqueue_mask(&flush, &t->pending);
914 } else if (sig == SIGCONT) {
915 unsigned int why;
916
917
918
919 siginitset(&flush, SIG_KERNEL_STOP_MASK);
920 flush_sigqueue_mask(&flush, &signal->shared_pending);
921 for_each_thread(p, t) {
922 flush_sigqueue_mask(&flush, &t->pending);
923 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
924 if (likely(!(t->ptrace & PT_SEIZED)))
925 wake_up_state(t, __TASK_STOPPED);
926 else
927 ptrace_trap_notify(t);
928 }
929
930
931
932
933
934
935
936
937
938 why = 0;
939 if (signal->flags & SIGNAL_STOP_STOPPED)
940 why |= SIGNAL_CLD_CONTINUED;
941 else if (signal->group_stop_count)
942 why |= SIGNAL_CLD_STOPPED;
943
944 if (why) {
945
946
947
948
949
950 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
951 signal->group_stop_count = 0;
952 signal->group_exit_code = 0;
953 }
954 }
955
956 return !sig_ignored(p, sig, force);
957}
958
959
960
961
962
963
964
965
966
967static inline bool wants_signal(int sig, struct task_struct *p)
968{
969 if (sigismember(&p->blocked, sig))
970 return false;
971
972 if (p->flags & PF_EXITING)
973 return false;
974
975 if (sig == SIGKILL)
976 return true;
977
978 if (task_is_stopped_or_traced(p))
979 return false;
980
981 return task_curr(p) || !signal_pending(p);
982}
983
984static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
985{
986 struct signal_struct *signal = p->signal;
987 struct task_struct *t;
988
989
990
991
992
993
994
995 if (wants_signal(sig, p))
996 t = p;
997 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
998
999
1000
1001
1002 return;
1003 else {
1004
1005
1006
1007 t = signal->curr_target;
1008 while (!wants_signal(sig, t)) {
1009 t = next_thread(t);
1010 if (t == signal->curr_target)
1011
1012
1013
1014
1015
1016 return;
1017 }
1018 signal->curr_target = t;
1019 }
1020
1021
1022
1023
1024
1025 if (sig_fatal(p, sig) &&
1026 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1027 !sigismember(&t->real_blocked, sig) &&
1028 (sig == SIGKILL || !p->ptrace)) {
1029
1030
1031
1032 if (!sig_kernel_coredump(sig)) {
1033
1034
1035
1036
1037
1038
1039 signal->flags = SIGNAL_GROUP_EXIT;
1040 signal->group_exit_code = sig;
1041 signal->group_stop_count = 0;
1042 t = p;
1043 do {
1044 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1045 sigaddset(&t->pending.signal, SIGKILL);
1046 signal_wake_up(t, 1);
1047 } while_each_thread(p, t);
1048 return;
1049 }
1050 }
1051
1052
1053
1054
1055
1056 signal_wake_up(t, sig == SIGKILL);
1057 return;
1058}
1059
1060static inline bool legacy_queue(struct sigpending *signals, int sig)
1061{
1062 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1063}
1064
1065static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1066 enum pid_type type, bool force)
1067{
1068 struct sigpending *pending;
1069 struct sigqueue *q;
1070 int override_rlimit;
1071 int ret = 0, result;
1072
1073 assert_spin_locked(&t->sighand->siglock);
1074
1075 result = TRACE_SIGNAL_IGNORED;
1076 if (!prepare_signal(sig, t, force))
1077 goto ret;
1078
1079 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1080
1081
1082
1083
1084
1085 result = TRACE_SIGNAL_ALREADY_PENDING;
1086 if (legacy_queue(pending, sig))
1087 goto ret;
1088
1089 result = TRACE_SIGNAL_DELIVERED;
1090
1091
1092
1093 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1094 goto out_set;
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105 if (sig < SIGRTMIN)
1106 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1107 else
1108 override_rlimit = 0;
1109
1110 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1111 if (q) {
1112 list_add_tail(&q->list, &pending->list);
1113 switch ((unsigned long) info) {
1114 case (unsigned long) SEND_SIG_NOINFO:
1115 clear_siginfo(&q->info);
1116 q->info.si_signo = sig;
1117 q->info.si_errno = 0;
1118 q->info.si_code = SI_USER;
1119 q->info.si_pid = task_tgid_nr_ns(current,
1120 task_active_pid_ns(t));
1121 rcu_read_lock();
1122 q->info.si_uid =
1123 from_kuid_munged(task_cred_xxx(t, user_ns),
1124 current_uid());
1125 rcu_read_unlock();
1126 break;
1127 case (unsigned long) SEND_SIG_PRIV:
1128 clear_siginfo(&q->info);
1129 q->info.si_signo = sig;
1130 q->info.si_errno = 0;
1131 q->info.si_code = SI_KERNEL;
1132 q->info.si_pid = 0;
1133 q->info.si_uid = 0;
1134 break;
1135 default:
1136 copy_siginfo(&q->info, info);
1137 break;
1138 }
1139 } else if (!is_si_special(info) &&
1140 sig >= SIGRTMIN && info->si_code != SI_USER) {
1141
1142
1143
1144
1145
1146 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1147 ret = -EAGAIN;
1148 goto ret;
1149 } else {
1150
1151
1152
1153
1154 result = TRACE_SIGNAL_LOSE_INFO;
1155 }
1156
1157out_set:
1158 signalfd_notify(t, sig);
1159 sigaddset(&pending->signal, sig);
1160
1161
1162 if (type > PIDTYPE_TGID) {
1163 struct multiprocess_signals *delayed;
1164 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1165 sigset_t *signal = &delayed->signal;
1166
1167 if (sig == SIGCONT)
1168 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1169 else if (sig_kernel_stop(sig))
1170 sigdelset(signal, SIGCONT);
1171 sigaddset(signal, sig);
1172 }
1173 }
1174
1175 complete_signal(sig, t, type);
1176ret:
1177 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1178 return ret;
1179}
1180
1181static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1182{
1183 bool ret = false;
1184 switch (siginfo_layout(info->si_signo, info->si_code)) {
1185 case SIL_KILL:
1186 case SIL_CHLD:
1187 case SIL_RT:
1188 ret = true;
1189 break;
1190 case SIL_TIMER:
1191 case SIL_POLL:
1192 case SIL_FAULT:
1193 case SIL_FAULT_MCEERR:
1194 case SIL_FAULT_BNDERR:
1195 case SIL_FAULT_PKUERR:
1196 case SIL_SYS:
1197 ret = false;
1198 break;
1199 }
1200 return ret;
1201}
1202
1203static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1204 enum pid_type type)
1205{
1206
1207 bool force = false;
1208
1209 if (info == SEND_SIG_NOINFO) {
1210
1211 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1212 } else if (info == SEND_SIG_PRIV) {
1213
1214 force = true;
1215 } else if (has_si_pid_and_uid(info)) {
1216
1217 struct user_namespace *t_user_ns;
1218
1219 rcu_read_lock();
1220 t_user_ns = task_cred_xxx(t, user_ns);
1221 if (current_user_ns() != t_user_ns) {
1222 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1223 info->si_uid = from_kuid_munged(t_user_ns, uid);
1224 }
1225 rcu_read_unlock();
1226
1227
1228 force = (info->si_code == SI_KERNEL);
1229
1230
1231 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1232 info->si_pid = 0;
1233 force = true;
1234 }
1235 }
1236 return __send_signal(sig, info, t, type, force);
1237}
1238
1239static void print_fatal_signal(int signr)
1240{
1241 struct pt_regs *regs = signal_pt_regs();
1242 pr_info("potentially unexpected fatal signal %d.\n", signr);
1243
1244#if defined(__i386__) && !defined(__arch_um__)
1245 pr_info("code at %08lx: ", regs->ip);
1246 {
1247 int i;
1248 for (i = 0; i < 16; i++) {
1249 unsigned char insn;
1250
1251 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1252 break;
1253 pr_cont("%02x ", insn);
1254 }
1255 }
1256 pr_cont("\n");
1257#endif
1258 preempt_disable();
1259 show_regs(regs);
1260 preempt_enable();
1261}
1262
1263static int __init setup_print_fatal_signals(char *str)
1264{
1265 get_option (&str, &print_fatal_signals);
1266
1267 return 1;
1268}
1269
1270__setup("print-fatal-signals=", setup_print_fatal_signals);
1271
1272int
1273__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1274{
1275 return send_signal(sig, info, p, PIDTYPE_TGID);
1276}
1277
1278int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1279 enum pid_type type)
1280{
1281 unsigned long flags;
1282 int ret = -ESRCH;
1283
1284 if (lock_task_sighand(p, &flags)) {
1285 ret = send_signal(sig, info, p, type);
1286 unlock_task_sighand(p, &flags);
1287 }
1288
1289 return ret;
1290}
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303static int
1304force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1305{
1306 unsigned long int flags;
1307 int ret, blocked, ignored;
1308 struct k_sigaction *action;
1309 int sig = info->si_signo;
1310
1311 spin_lock_irqsave(&t->sighand->siglock, flags);
1312 action = &t->sighand->action[sig-1];
1313 ignored = action->sa.sa_handler == SIG_IGN;
1314 blocked = sigismember(&t->blocked, sig);
1315 if (blocked || ignored) {
1316 action->sa.sa_handler = SIG_DFL;
1317 if (blocked) {
1318 sigdelset(&t->blocked, sig);
1319 recalc_sigpending_and_wake(t);
1320 }
1321 }
1322
1323
1324
1325
1326 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1327 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1328 ret = send_signal(sig, info, t, PIDTYPE_PID);
1329 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1330
1331 return ret;
1332}
1333
1334int force_sig_info(struct kernel_siginfo *info)
1335{
1336 return force_sig_info_to_task(info, current);
1337}
1338
1339
1340
1341
1342int zap_other_threads(struct task_struct *p)
1343{
1344 struct task_struct *t = p;
1345 int count = 0;
1346
1347 p->signal->group_stop_count = 0;
1348
1349 while_each_thread(p, t) {
1350 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1351 count++;
1352
1353
1354 if (t->exit_state)
1355 continue;
1356 sigaddset(&t->pending.signal, SIGKILL);
1357 signal_wake_up(t, 1);
1358 }
1359
1360 return count;
1361}
1362
1363struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1364 unsigned long *flags)
1365{
1366 struct sighand_struct *sighand;
1367
1368 rcu_read_lock();
1369 for (;;) {
1370 sighand = rcu_dereference(tsk->sighand);
1371 if (unlikely(sighand == NULL))
1372 break;
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385 spin_lock_irqsave(&sighand->siglock, *flags);
1386 if (likely(sighand == tsk->sighand))
1387 break;
1388 spin_unlock_irqrestore(&sighand->siglock, *flags);
1389 }
1390 rcu_read_unlock();
1391
1392 return sighand;
1393}
1394
1395
1396
1397
1398int group_send_sig_info(int sig, struct kernel_siginfo *info,
1399 struct task_struct *p, enum pid_type type)
1400{
1401 int ret;
1402
1403 rcu_read_lock();
1404 ret = check_kill_permission(sig, info, p);
1405 rcu_read_unlock();
1406
1407 if (!ret && sig)
1408 ret = do_send_sig_info(sig, info, p, type);
1409
1410 return ret;
1411}
1412
1413
1414
1415
1416
1417
1418int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1419{
1420 struct task_struct *p = NULL;
1421 int retval, success;
1422
1423 success = 0;
1424 retval = -ESRCH;
1425 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1426 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1427 success |= !err;
1428 retval = err;
1429 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1430 return success ? 0 : retval;
1431}
1432
1433int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1434{
1435 int error = -ESRCH;
1436 struct task_struct *p;
1437
1438 for (;;) {
1439 rcu_read_lock();
1440 p = pid_task(pid, PIDTYPE_PID);
1441 if (p)
1442 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1443 rcu_read_unlock();
1444 if (likely(!p || error != -ESRCH))
1445 return error;
1446
1447
1448
1449
1450
1451
1452 }
1453}
1454
1455static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1456{
1457 int error;
1458 rcu_read_lock();
1459 error = kill_pid_info(sig, info, find_vpid(pid));
1460 rcu_read_unlock();
1461 return error;
1462}
1463
1464static inline bool kill_as_cred_perm(const struct cred *cred,
1465 struct task_struct *target)
1466{
1467 const struct cred *pcred = __task_cred(target);
1468
1469 return uid_eq(cred->euid, pcred->suid) ||
1470 uid_eq(cred->euid, pcred->uid) ||
1471 uid_eq(cred->uid, pcred->suid) ||
1472 uid_eq(cred->uid, pcred->uid);
1473}
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1501 struct pid *pid, const struct cred *cred)
1502{
1503 struct kernel_siginfo info;
1504 struct task_struct *p;
1505 unsigned long flags;
1506 int ret = -EINVAL;
1507
1508 clear_siginfo(&info);
1509 info.si_signo = sig;
1510 info.si_errno = errno;
1511 info.si_code = SI_ASYNCIO;
1512 *((sigval_t *)&info.si_pid) = addr;
1513
1514 if (!valid_signal(sig))
1515 return ret;
1516
1517 rcu_read_lock();
1518 p = pid_task(pid, PIDTYPE_PID);
1519 if (!p) {
1520 ret = -ESRCH;
1521 goto out_unlock;
1522 }
1523 if (!kill_as_cred_perm(cred, p)) {
1524 ret = -EPERM;
1525 goto out_unlock;
1526 }
1527 ret = security_task_kill(p, &info, sig, cred);
1528 if (ret)
1529 goto out_unlock;
1530
1531 if (sig) {
1532 if (lock_task_sighand(p, &flags)) {
1533 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1534 unlock_task_sighand(p, &flags);
1535 } else
1536 ret = -ESRCH;
1537 }
1538out_unlock:
1539 rcu_read_unlock();
1540 return ret;
1541}
1542EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1543
1544
1545
1546
1547
1548
1549
1550
1551static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1552{
1553 int ret;
1554
1555 if (pid > 0) {
1556 rcu_read_lock();
1557 ret = kill_pid_info(sig, info, find_vpid(pid));
1558 rcu_read_unlock();
1559 return ret;
1560 }
1561
1562
1563 if (pid == INT_MIN)
1564 return -ESRCH;
1565
1566 read_lock(&tasklist_lock);
1567 if (pid != -1) {
1568 ret = __kill_pgrp_info(sig, info,
1569 pid ? find_vpid(-pid) : task_pgrp(current));
1570 } else {
1571 int retval = 0, count = 0;
1572 struct task_struct * p;
1573
1574 for_each_process(p) {
1575 if (task_pid_vnr(p) > 1 &&
1576 !same_thread_group(p, current)) {
1577 int err = group_send_sig_info(sig, info, p,
1578 PIDTYPE_MAX);
1579 ++count;
1580 if (err != -EPERM)
1581 retval = err;
1582 }
1583 }
1584 ret = count ? retval : -ESRCH;
1585 }
1586 read_unlock(&tasklist_lock);
1587
1588 return ret;
1589}
1590
1591
1592
1593
1594
1595int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1596{
1597
1598
1599
1600
1601 if (!valid_signal(sig))
1602 return -EINVAL;
1603
1604 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1605}
1606EXPORT_SYMBOL(send_sig_info);
1607
1608#define __si_special(priv) \
1609 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1610
1611int
1612send_sig(int sig, struct task_struct *p, int priv)
1613{
1614 return send_sig_info(sig, __si_special(priv), p);
1615}
1616EXPORT_SYMBOL(send_sig);
1617
1618void force_sig(int sig)
1619{
1620 struct kernel_siginfo info;
1621
1622 clear_siginfo(&info);
1623 info.si_signo = sig;
1624 info.si_errno = 0;
1625 info.si_code = SI_KERNEL;
1626 info.si_pid = 0;
1627 info.si_uid = 0;
1628 force_sig_info(&info);
1629}
1630EXPORT_SYMBOL(force_sig);
1631
1632
1633
1634
1635
1636
1637
1638void force_sigsegv(int sig)
1639{
1640 struct task_struct *p = current;
1641
1642 if (sig == SIGSEGV) {
1643 unsigned long flags;
1644 spin_lock_irqsave(&p->sighand->siglock, flags);
1645 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1646 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1647 }
1648 force_sig(SIGSEGV);
1649}
1650
1651int force_sig_fault_to_task(int sig, int code, void __user *addr
1652 ___ARCH_SI_TRAPNO(int trapno)
1653 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1654 , struct task_struct *t)
1655{
1656 struct kernel_siginfo info;
1657
1658 clear_siginfo(&info);
1659 info.si_signo = sig;
1660 info.si_errno = 0;
1661 info.si_code = code;
1662 info.si_addr = addr;
1663#ifdef __ARCH_SI_TRAPNO
1664 info.si_trapno = trapno;
1665#endif
1666#ifdef __ia64__
1667 info.si_imm = imm;
1668 info.si_flags = flags;
1669 info.si_isr = isr;
1670#endif
1671 return force_sig_info_to_task(&info, t);
1672}
1673
1674int force_sig_fault(int sig, int code, void __user *addr
1675 ___ARCH_SI_TRAPNO(int trapno)
1676 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1677{
1678 return force_sig_fault_to_task(sig, code, addr
1679 ___ARCH_SI_TRAPNO(trapno)
1680 ___ARCH_SI_IA64(imm, flags, isr), current);
1681}
1682
1683int send_sig_fault(int sig, int code, void __user *addr
1684 ___ARCH_SI_TRAPNO(int trapno)
1685 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1686 , struct task_struct *t)
1687{
1688 struct kernel_siginfo info;
1689
1690 clear_siginfo(&info);
1691 info.si_signo = sig;
1692 info.si_errno = 0;
1693 info.si_code = code;
1694 info.si_addr = addr;
1695#ifdef __ARCH_SI_TRAPNO
1696 info.si_trapno = trapno;
1697#endif
1698#ifdef __ia64__
1699 info.si_imm = imm;
1700 info.si_flags = flags;
1701 info.si_isr = isr;
1702#endif
1703 return send_sig_info(info.si_signo, &info, t);
1704}
1705
1706int force_sig_mceerr(int code, void __user *addr, short lsb)
1707{
1708 struct kernel_siginfo info;
1709
1710 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1711 clear_siginfo(&info);
1712 info.si_signo = SIGBUS;
1713 info.si_errno = 0;
1714 info.si_code = code;
1715 info.si_addr = addr;
1716 info.si_addr_lsb = lsb;
1717 return force_sig_info(&info);
1718}
1719
1720int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1721{
1722 struct kernel_siginfo info;
1723
1724 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1725 clear_siginfo(&info);
1726 info.si_signo = SIGBUS;
1727 info.si_errno = 0;
1728 info.si_code = code;
1729 info.si_addr = addr;
1730 info.si_addr_lsb = lsb;
1731 return send_sig_info(info.si_signo, &info, t);
1732}
1733EXPORT_SYMBOL(send_sig_mceerr);
1734
1735int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1736{
1737 struct kernel_siginfo info;
1738
1739 clear_siginfo(&info);
1740 info.si_signo = SIGSEGV;
1741 info.si_errno = 0;
1742 info.si_code = SEGV_BNDERR;
1743 info.si_addr = addr;
1744 info.si_lower = lower;
1745 info.si_upper = upper;
1746 return force_sig_info(&info);
1747}
1748
1749#ifdef SEGV_PKUERR
1750int force_sig_pkuerr(void __user *addr, u32 pkey)
1751{
1752 struct kernel_siginfo info;
1753
1754 clear_siginfo(&info);
1755 info.si_signo = SIGSEGV;
1756 info.si_errno = 0;
1757 info.si_code = SEGV_PKUERR;
1758 info.si_addr = addr;
1759 info.si_pkey = pkey;
1760 return force_sig_info(&info);
1761}
1762#endif
1763
1764
1765
1766
1767int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1768{
1769 struct kernel_siginfo info;
1770
1771 clear_siginfo(&info);
1772 info.si_signo = SIGTRAP;
1773 info.si_errno = errno;
1774 info.si_code = TRAP_HWBKPT;
1775 info.si_addr = addr;
1776 return force_sig_info(&info);
1777}
1778
1779int kill_pgrp(struct pid *pid, int sig, int priv)
1780{
1781 int ret;
1782
1783 read_lock(&tasklist_lock);
1784 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1785 read_unlock(&tasklist_lock);
1786
1787 return ret;
1788}
1789EXPORT_SYMBOL(kill_pgrp);
1790
1791int kill_pid(struct pid *pid, int sig, int priv)
1792{
1793 return kill_pid_info(sig, __si_special(priv), pid);
1794}
1795EXPORT_SYMBOL(kill_pid);
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806struct sigqueue *sigqueue_alloc(void)
1807{
1808 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1809
1810 if (q)
1811 q->flags |= SIGQUEUE_PREALLOC;
1812
1813 return q;
1814}
1815
1816void sigqueue_free(struct sigqueue *q)
1817{
1818 unsigned long flags;
1819 spinlock_t *lock = ¤t->sighand->siglock;
1820
1821 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1822
1823
1824
1825
1826
1827 spin_lock_irqsave(lock, flags);
1828 q->flags &= ~SIGQUEUE_PREALLOC;
1829
1830
1831
1832
1833 if (!list_empty(&q->list))
1834 q = NULL;
1835 spin_unlock_irqrestore(lock, flags);
1836
1837 if (q)
1838 __sigqueue_free(q);
1839}
1840
1841int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1842{
1843 int sig = q->info.si_signo;
1844 struct sigpending *pending;
1845 struct task_struct *t;
1846 unsigned long flags;
1847 int ret, result;
1848
1849 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1850
1851 ret = -1;
1852 rcu_read_lock();
1853 t = pid_task(pid, type);
1854 if (!t || !likely(lock_task_sighand(t, &flags)))
1855 goto ret;
1856
1857 ret = 1;
1858 result = TRACE_SIGNAL_IGNORED;
1859 if (!prepare_signal(sig, t, false))
1860 goto out;
1861
1862 ret = 0;
1863 if (unlikely(!list_empty(&q->list))) {
1864
1865
1866
1867
1868 BUG_ON(q->info.si_code != SI_TIMER);
1869 q->info.si_overrun++;
1870 result = TRACE_SIGNAL_ALREADY_PENDING;
1871 goto out;
1872 }
1873 q->info.si_overrun = 0;
1874
1875 signalfd_notify(t, sig);
1876 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1877 list_add_tail(&q->list, &pending->list);
1878 sigaddset(&pending->signal, sig);
1879 complete_signal(sig, t, type);
1880 result = TRACE_SIGNAL_DELIVERED;
1881out:
1882 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1883 unlock_task_sighand(t, &flags);
1884ret:
1885 rcu_read_unlock();
1886 return ret;
1887}
1888
1889static void do_notify_pidfd(struct task_struct *task)
1890{
1891 struct pid *pid;
1892
1893 WARN_ON(task->exit_state == 0);
1894 pid = task_pid(task);
1895 wake_up_all(&pid->wait_pidfd);
1896}
1897
1898
1899
1900
1901
1902
1903
1904
1905bool do_notify_parent(struct task_struct *tsk, int sig)
1906{
1907 struct kernel_siginfo info;
1908 unsigned long flags;
1909 struct sighand_struct *psig;
1910 bool autoreap = false;
1911 u64 utime, stime;
1912
1913 BUG_ON(sig == -1);
1914
1915
1916 BUG_ON(task_is_stopped_or_traced(tsk));
1917
1918 BUG_ON(!tsk->ptrace &&
1919 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1920
1921
1922 do_notify_pidfd(tsk);
1923
1924 if (sig != SIGCHLD) {
1925
1926
1927
1928
1929 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1930 sig = SIGCHLD;
1931 }
1932
1933 clear_siginfo(&info);
1934 info.si_signo = sig;
1935 info.si_errno = 0;
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947 rcu_read_lock();
1948 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1949 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1950 task_uid(tsk));
1951 rcu_read_unlock();
1952
1953 task_cputime(tsk, &utime, &stime);
1954 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1955 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1956
1957 info.si_status = tsk->exit_code & 0x7f;
1958 if (tsk->exit_code & 0x80)
1959 info.si_code = CLD_DUMPED;
1960 else if (tsk->exit_code & 0x7f)
1961 info.si_code = CLD_KILLED;
1962 else {
1963 info.si_code = CLD_EXITED;
1964 info.si_status = tsk->exit_code >> 8;
1965 }
1966
1967 psig = tsk->parent->sighand;
1968 spin_lock_irqsave(&psig->siglock, flags);
1969 if (!tsk->ptrace && sig == SIGCHLD &&
1970 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1971 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987 autoreap = true;
1988 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1989 sig = 0;
1990 }
1991 if (valid_signal(sig) && sig)
1992 __group_send_sig_info(sig, &info, tsk->parent);
1993 __wake_up_parent(tsk, tsk->parent);
1994 spin_unlock_irqrestore(&psig->siglock, flags);
1995
1996 return autoreap;
1997}
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012static void do_notify_parent_cldstop(struct task_struct *tsk,
2013 bool for_ptracer, int why)
2014{
2015 struct kernel_siginfo info;
2016 unsigned long flags;
2017 struct task_struct *parent;
2018 struct sighand_struct *sighand;
2019 u64 utime, stime;
2020
2021 if (for_ptracer) {
2022 parent = tsk->parent;
2023 } else {
2024 tsk = tsk->group_leader;
2025 parent = tsk->real_parent;
2026 }
2027
2028 clear_siginfo(&info);
2029 info.si_signo = SIGCHLD;
2030 info.si_errno = 0;
2031
2032
2033
2034 rcu_read_lock();
2035 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2036 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2037 rcu_read_unlock();
2038
2039 task_cputime(tsk, &utime, &stime);
2040 info.si_utime = nsec_to_clock_t(utime);
2041 info.si_stime = nsec_to_clock_t(stime);
2042
2043 info.si_code = why;
2044 switch (why) {
2045 case CLD_CONTINUED:
2046 info.si_status = SIGCONT;
2047 break;
2048 case CLD_STOPPED:
2049 info.si_status = tsk->signal->group_exit_code & 0x7f;
2050 break;
2051 case CLD_TRAPPED:
2052 info.si_status = tsk->exit_code & 0x7f;
2053 break;
2054 default:
2055 BUG();
2056 }
2057
2058 sighand = parent->sighand;
2059 spin_lock_irqsave(&sighand->siglock, flags);
2060 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2061 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2062 __group_send_sig_info(SIGCHLD, &info, parent);
2063
2064
2065
2066 __wake_up_parent(tsk, parent);
2067 spin_unlock_irqrestore(&sighand->siglock, flags);
2068}
2069
2070static inline bool may_ptrace_stop(void)
2071{
2072 if (!likely(current->ptrace))
2073 return false;
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087 if (unlikely(current->mm->core_state) &&
2088 unlikely(current->mm == current->parent->mm))
2089 return false;
2090
2091 return true;
2092}
2093
2094
2095
2096
2097
2098static bool sigkill_pending(struct task_struct *tsk)
2099{
2100 return sigismember(&tsk->pending.signal, SIGKILL) ||
2101 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2102}
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2116 __releases(¤t->sighand->siglock)
2117 __acquires(¤t->sighand->siglock)
2118{
2119 bool gstop_done = false;
2120
2121 if (arch_ptrace_stop_needed(exit_code, info)) {
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133 spin_unlock_irq(¤t->sighand->siglock);
2134 arch_ptrace_stop(exit_code, info);
2135 spin_lock_irq(¤t->sighand->siglock);
2136 if (sigkill_pending(current))
2137 return;
2138 }
2139
2140 set_special_state(TASK_TRACED);
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160 smp_wmb();
2161
2162 current->last_siginfo = info;
2163 current->exit_code = exit_code;
2164
2165
2166
2167
2168
2169
2170
2171
2172 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2173 gstop_done = task_participate_group_stop(current);
2174
2175
2176 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2177 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2178 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2179
2180
2181 task_clear_jobctl_trapping(current);
2182
2183 spin_unlock_irq(¤t->sighand->siglock);
2184 read_lock(&tasklist_lock);
2185 if (may_ptrace_stop()) {
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196 do_notify_parent_cldstop(current, true, why);
2197 if (gstop_done && ptrace_reparented(current))
2198 do_notify_parent_cldstop(current, false, why);
2199
2200
2201
2202
2203
2204
2205
2206 preempt_disable();
2207 read_unlock(&tasklist_lock);
2208 preempt_enable_no_resched();
2209 cgroup_enter_frozen();
2210 freezable_schedule();
2211 cgroup_leave_frozen(true);
2212 } else {
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223 if (gstop_done)
2224 do_notify_parent_cldstop(current, false, why);
2225
2226
2227 __set_current_state(TASK_RUNNING);
2228 if (clear_code)
2229 current->exit_code = 0;
2230 read_unlock(&tasklist_lock);
2231 }
2232
2233
2234
2235
2236
2237
2238 spin_lock_irq(¤t->sighand->siglock);
2239 current->last_siginfo = NULL;
2240
2241
2242 current->jobctl &= ~JOBCTL_LISTENING;
2243
2244
2245
2246
2247
2248
2249 recalc_sigpending_tsk(current);
2250}
2251
2252static void ptrace_do_notify(int signr, int exit_code, int why)
2253{
2254 kernel_siginfo_t info;
2255
2256 clear_siginfo(&info);
2257 info.si_signo = signr;
2258 info.si_code = exit_code;
2259 info.si_pid = task_pid_vnr(current);
2260 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2261
2262
2263 ptrace_stop(exit_code, why, 1, &info);
2264}
2265
2266void ptrace_notify(int exit_code)
2267{
2268 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2269 if (unlikely(current->task_works))
2270 task_work_run();
2271
2272 spin_lock_irq(¤t->sighand->siglock);
2273 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2274 spin_unlock_irq(¤t->sighand->siglock);
2275}
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299static bool do_signal_stop(int signr)
2300 __releases(¤t->sighand->siglock)
2301{
2302 struct signal_struct *sig = current->signal;
2303
2304 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2305 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2306 struct task_struct *t;
2307
2308
2309 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2310
2311 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2312 unlikely(signal_group_exit(sig)))
2313 return false;
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2334 sig->group_exit_code = signr;
2335
2336 sig->group_stop_count = 0;
2337
2338 if (task_set_jobctl_pending(current, signr | gstop))
2339 sig->group_stop_count++;
2340
2341 t = current;
2342 while_each_thread(current, t) {
2343
2344
2345
2346
2347
2348 if (!task_is_stopped(t) &&
2349 task_set_jobctl_pending(t, signr | gstop)) {
2350 sig->group_stop_count++;
2351 if (likely(!(t->ptrace & PT_SEIZED)))
2352 signal_wake_up(t, 0);
2353 else
2354 ptrace_trap_notify(t);
2355 }
2356 }
2357 }
2358
2359 if (likely(!current->ptrace)) {
2360 int notify = 0;
2361
2362
2363
2364
2365
2366
2367 if (task_participate_group_stop(current))
2368 notify = CLD_STOPPED;
2369
2370 set_special_state(TASK_STOPPED);
2371 spin_unlock_irq(¤t->sighand->siglock);
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382 if (notify) {
2383 read_lock(&tasklist_lock);
2384 do_notify_parent_cldstop(current, false, notify);
2385 read_unlock(&tasklist_lock);
2386 }
2387
2388
2389 cgroup_enter_frozen();
2390 freezable_schedule();
2391 return true;
2392 } else {
2393
2394
2395
2396
2397 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2398 return false;
2399 }
2400}
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417static void do_jobctl_trap(void)
2418{
2419 struct signal_struct *signal = current->signal;
2420 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2421
2422 if (current->ptrace & PT_SEIZED) {
2423 if (!signal->group_stop_count &&
2424 !(signal->flags & SIGNAL_STOP_STOPPED))
2425 signr = SIGTRAP;
2426 WARN_ON_ONCE(!signr);
2427 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2428 CLD_STOPPED);
2429 } else {
2430 WARN_ON_ONCE(!signr);
2431 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2432 current->exit_code = 0;
2433 }
2434}
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446static void do_freezer_trap(void)
2447 __releases(¤t->sighand->siglock)
2448{
2449
2450
2451
2452
2453
2454 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2455 JOBCTL_TRAP_FREEZE) {
2456 spin_unlock_irq(¤t->sighand->siglock);
2457 return;
2458 }
2459
2460
2461
2462
2463
2464
2465
2466 __set_current_state(TASK_INTERRUPTIBLE);
2467 clear_thread_flag(TIF_SIGPENDING);
2468 spin_unlock_irq(¤t->sighand->siglock);
2469 cgroup_enter_frozen();
2470 freezable_schedule();
2471}
2472
2473static int ptrace_signal(int signr, kernel_siginfo_t *info)
2474{
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2485 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2486
2487
2488 signr = current->exit_code;
2489 if (signr == 0)
2490 return signr;
2491
2492 current->exit_code = 0;
2493
2494
2495
2496
2497
2498
2499
2500 if (signr != info->si_signo) {
2501 clear_siginfo(info);
2502 info->si_signo = signr;
2503 info->si_errno = 0;
2504 info->si_code = SI_USER;
2505 rcu_read_lock();
2506 info->si_pid = task_pid_vnr(current->parent);
2507 info->si_uid = from_kuid_munged(current_user_ns(),
2508 task_uid(current->parent));
2509 rcu_read_unlock();
2510 }
2511
2512
2513 if (sigismember(¤t->blocked, signr)) {
2514 send_signal(signr, info, current, PIDTYPE_PID);
2515 signr = 0;
2516 }
2517
2518 return signr;
2519}
2520
2521bool get_signal(struct ksignal *ksig)
2522{
2523 struct sighand_struct *sighand = current->sighand;
2524 struct signal_struct *signal = current->signal;
2525 int signr;
2526
2527 if (unlikely(current->task_works))
2528 task_work_run();
2529
2530 if (unlikely(uprobe_deny_signal()))
2531 return false;
2532
2533
2534
2535
2536
2537
2538 try_to_freeze();
2539
2540relock:
2541 spin_lock_irq(&sighand->siglock);
2542
2543
2544
2545
2546
2547 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2548 int why;
2549
2550 if (signal->flags & SIGNAL_CLD_CONTINUED)
2551 why = CLD_CONTINUED;
2552 else
2553 why = CLD_STOPPED;
2554
2555 signal->flags &= ~SIGNAL_CLD_MASK;
2556
2557 spin_unlock_irq(&sighand->siglock);
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567 read_lock(&tasklist_lock);
2568 do_notify_parent_cldstop(current, false, why);
2569
2570 if (ptrace_reparented(current->group_leader))
2571 do_notify_parent_cldstop(current->group_leader,
2572 true, why);
2573 read_unlock(&tasklist_lock);
2574
2575 goto relock;
2576 }
2577
2578
2579 if (signal_group_exit(signal)) {
2580 ksig->info.si_signo = signr = SIGKILL;
2581 sigdelset(¤t->pending.signal, SIGKILL);
2582 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2583 &sighand->action[SIGKILL - 1]);
2584 recalc_sigpending();
2585 goto fatal;
2586 }
2587
2588 for (;;) {
2589 struct k_sigaction *ka;
2590
2591 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2592 do_signal_stop(0))
2593 goto relock;
2594
2595 if (unlikely(current->jobctl &
2596 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2597 if (current->jobctl & JOBCTL_TRAP_MASK) {
2598 do_jobctl_trap();
2599 spin_unlock_irq(&sighand->siglock);
2600 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2601 do_freezer_trap();
2602
2603 goto relock;
2604 }
2605
2606
2607
2608
2609
2610 if (unlikely(cgroup_task_frozen(current))) {
2611 spin_unlock_irq(&sighand->siglock);
2612 cgroup_leave_frozen(false);
2613 goto relock;
2614 }
2615
2616
2617
2618
2619
2620
2621
2622 signr = dequeue_synchronous_signal(&ksig->info);
2623 if (!signr)
2624 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2625
2626 if (!signr)
2627 break;
2628
2629 if (unlikely(current->ptrace) && signr != SIGKILL) {
2630 signr = ptrace_signal(signr, &ksig->info);
2631 if (!signr)
2632 continue;
2633 }
2634
2635 ka = &sighand->action[signr-1];
2636
2637
2638 trace_signal_deliver(signr, &ksig->info, ka);
2639
2640 if (ka->sa.sa_handler == SIG_IGN)
2641 continue;
2642 if (ka->sa.sa_handler != SIG_DFL) {
2643
2644 ksig->ka = *ka;
2645
2646 if (ka->sa.sa_flags & SA_ONESHOT)
2647 ka->sa.sa_handler = SIG_DFL;
2648
2649 break;
2650 }
2651
2652
2653
2654
2655 if (sig_kernel_ignore(signr))
2656 continue;
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2669 !sig_kernel_only(signr))
2670 continue;
2671
2672 if (sig_kernel_stop(signr)) {
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683 if (signr != SIGSTOP) {
2684 spin_unlock_irq(&sighand->siglock);
2685
2686
2687
2688 if (is_current_pgrp_orphaned())
2689 goto relock;
2690
2691 spin_lock_irq(&sighand->siglock);
2692 }
2693
2694 if (likely(do_signal_stop(ksig->info.si_signo))) {
2695
2696 goto relock;
2697 }
2698
2699
2700
2701
2702
2703 continue;
2704 }
2705
2706 fatal:
2707 spin_unlock_irq(&sighand->siglock);
2708 if (unlikely(cgroup_task_frozen(current)))
2709 cgroup_leave_frozen(true);
2710
2711
2712
2713
2714 current->flags |= PF_SIGNALED;
2715
2716 if (sig_kernel_coredump(signr)) {
2717 if (print_fatal_signals)
2718 print_fatal_signal(ksig->info.si_signo);
2719 proc_coredump_connector(current);
2720
2721
2722
2723
2724
2725
2726
2727
2728 do_coredump(&ksig->info);
2729 }
2730
2731
2732
2733
2734 do_group_exit(ksig->info.si_signo);
2735
2736 }
2737 spin_unlock_irq(&sighand->siglock);
2738
2739 ksig->sig = signr;
2740 return ksig->sig > 0;
2741}
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753static void signal_delivered(struct ksignal *ksig, int stepping)
2754{
2755 sigset_t blocked;
2756
2757
2758
2759
2760
2761 clear_restore_sigmask();
2762
2763 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2764 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2765 sigaddset(&blocked, ksig->sig);
2766 set_current_blocked(&blocked);
2767 tracehook_signal_handler(stepping);
2768}
2769
2770void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2771{
2772 if (failed)
2773 force_sigsegv(ksig->sig);
2774 else
2775 signal_delivered(ksig, stepping);
2776}
2777
2778
2779
2780
2781
2782
2783static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2784{
2785 sigset_t retarget;
2786 struct task_struct *t;
2787
2788 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2789 if (sigisemptyset(&retarget))
2790 return;
2791
2792 t = tsk;
2793 while_each_thread(tsk, t) {
2794 if (t->flags & PF_EXITING)
2795 continue;
2796
2797 if (!has_pending_signals(&retarget, &t->blocked))
2798 continue;
2799
2800 sigandsets(&retarget, &retarget, &t->blocked);
2801
2802 if (!signal_pending(t))
2803 signal_wake_up(t, 0);
2804
2805 if (sigisemptyset(&retarget))
2806 break;
2807 }
2808}
2809
2810void exit_signals(struct task_struct *tsk)
2811{
2812 int group_stop = 0;
2813 sigset_t unblocked;
2814
2815
2816
2817
2818
2819 cgroup_threadgroup_change_begin(tsk);
2820
2821 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2822 tsk->flags |= PF_EXITING;
2823 cgroup_threadgroup_change_end(tsk);
2824 return;
2825 }
2826
2827 spin_lock_irq(&tsk->sighand->siglock);
2828
2829
2830
2831
2832 tsk->flags |= PF_EXITING;
2833
2834 cgroup_threadgroup_change_end(tsk);
2835
2836 if (!signal_pending(tsk))
2837 goto out;
2838
2839 unblocked = tsk->blocked;
2840 signotset(&unblocked);
2841 retarget_shared_pending(tsk, &unblocked);
2842
2843 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2844 task_participate_group_stop(tsk))
2845 group_stop = CLD_STOPPED;
2846out:
2847 spin_unlock_irq(&tsk->sighand->siglock);
2848
2849
2850
2851
2852
2853 if (unlikely(group_stop)) {
2854 read_lock(&tasklist_lock);
2855 do_notify_parent_cldstop(tsk, false, group_stop);
2856 read_unlock(&tasklist_lock);
2857 }
2858}
2859
2860
2861
2862
2863
2864
2865
2866
2867SYSCALL_DEFINE0(restart_syscall)
2868{
2869 struct restart_block *restart = ¤t->restart_block;
2870 return restart->fn(restart);
2871}
2872
2873long do_no_restart_syscall(struct restart_block *param)
2874{
2875 return -EINTR;
2876}
2877
2878static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2879{
2880 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2881 sigset_t newblocked;
2882
2883 sigandnsets(&newblocked, newset, ¤t->blocked);
2884 retarget_shared_pending(tsk, &newblocked);
2885 }
2886 tsk->blocked = *newset;
2887 recalc_sigpending();
2888}
2889
2890
2891
2892
2893
2894
2895
2896
2897void set_current_blocked(sigset_t *newset)
2898{
2899 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2900 __set_current_blocked(newset);
2901}
2902
2903void __set_current_blocked(const sigset_t *newset)
2904{
2905 struct task_struct *tsk = current;
2906
2907
2908
2909
2910
2911 if (sigequalsets(&tsk->blocked, newset))
2912 return;
2913
2914 spin_lock_irq(&tsk->sighand->siglock);
2915 __set_task_blocked(tsk, newset);
2916 spin_unlock_irq(&tsk->sighand->siglock);
2917}
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2928{
2929 struct task_struct *tsk = current;
2930 sigset_t newset;
2931
2932
2933 if (oldset)
2934 *oldset = tsk->blocked;
2935
2936 switch (how) {
2937 case SIG_BLOCK:
2938 sigorsets(&newset, &tsk->blocked, set);
2939 break;
2940 case SIG_UNBLOCK:
2941 sigandnsets(&newset, &tsk->blocked, set);
2942 break;
2943 case SIG_SETMASK:
2944 newset = *set;
2945 break;
2946 default:
2947 return -EINVAL;
2948 }
2949
2950 __set_current_blocked(&newset);
2951 return 0;
2952}
2953EXPORT_SYMBOL(sigprocmask);
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
2965{
2966 sigset_t kmask;
2967
2968 if (!umask)
2969 return 0;
2970 if (sigsetsize != sizeof(sigset_t))
2971 return -EINVAL;
2972 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
2973 return -EFAULT;
2974
2975 set_restore_sigmask();
2976 current->saved_sigmask = current->blocked;
2977 set_current_blocked(&kmask);
2978
2979 return 0;
2980}
2981
2982#ifdef CONFIG_COMPAT
2983int set_compat_user_sigmask(const compat_sigset_t __user *umask,
2984 size_t sigsetsize)
2985{
2986 sigset_t kmask;
2987
2988 if (!umask)
2989 return 0;
2990 if (sigsetsize != sizeof(compat_sigset_t))
2991 return -EINVAL;
2992 if (get_compat_sigset(&kmask, umask))
2993 return -EFAULT;
2994
2995 set_restore_sigmask();
2996 current->saved_sigmask = current->blocked;
2997 set_current_blocked(&kmask);
2998
2999 return 0;
3000}
3001#endif
3002
3003
3004
3005
3006
3007
3008
3009
3010SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3011 sigset_t __user *, oset, size_t, sigsetsize)
3012{
3013 sigset_t old_set, new_set;
3014 int error;
3015
3016
3017 if (sigsetsize != sizeof(sigset_t))
3018 return -EINVAL;
3019
3020 old_set = current->blocked;
3021
3022 if (nset) {
3023 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3024 return -EFAULT;
3025 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3026
3027 error = sigprocmask(how, &new_set, NULL);
3028 if (error)
3029 return error;
3030 }
3031
3032 if (oset) {
3033 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3034 return -EFAULT;
3035 }
3036
3037 return 0;
3038}
3039
3040#ifdef CONFIG_COMPAT
3041COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3042 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3043{
3044 sigset_t old_set = current->blocked;
3045
3046
3047 if (sigsetsize != sizeof(sigset_t))
3048 return -EINVAL;
3049
3050 if (nset) {
3051 sigset_t new_set;
3052 int error;
3053 if (get_compat_sigset(&new_set, nset))
3054 return -EFAULT;
3055 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3056
3057 error = sigprocmask(how, &new_set, NULL);
3058 if (error)
3059 return error;
3060 }
3061 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3062}
3063#endif
3064
3065static void do_sigpending(sigset_t *set)
3066{
3067 spin_lock_irq(¤t->sighand->siglock);
3068 sigorsets(set, ¤t->pending.signal,
3069 ¤t->signal->shared_pending.signal);
3070 spin_unlock_irq(¤t->sighand->siglock);
3071
3072
3073 sigandsets(set, ¤t->blocked, set);
3074}
3075
3076
3077
3078
3079
3080
3081
3082SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3083{
3084 sigset_t set;
3085
3086 if (sigsetsize > sizeof(*uset))
3087 return -EINVAL;
3088
3089 do_sigpending(&set);
3090
3091 if (copy_to_user(uset, &set, sigsetsize))
3092 return -EFAULT;
3093
3094 return 0;
3095}
3096
3097#ifdef CONFIG_COMPAT
3098COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3099 compat_size_t, sigsetsize)
3100{
3101 sigset_t set;
3102
3103 if (sigsetsize > sizeof(*uset))
3104 return -EINVAL;
3105
3106 do_sigpending(&set);
3107
3108 return put_compat_sigset(uset, &set, sigsetsize);
3109}
3110#endif
3111
3112static const struct {
3113 unsigned char limit, layout;
3114} sig_sicodes[] = {
3115 [SIGILL] = { NSIGILL, SIL_FAULT },
3116 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3117 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3118 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3119 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3120#if defined(SIGEMT)
3121 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3122#endif
3123 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3124 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3125 [SIGSYS] = { NSIGSYS, SIL_SYS },
3126};
3127
3128static bool known_siginfo_layout(unsigned sig, int si_code)
3129{
3130 if (si_code == SI_KERNEL)
3131 return true;
3132 else if ((si_code > SI_USER)) {
3133 if (sig_specific_sicodes(sig)) {
3134 if (si_code <= sig_sicodes[sig].limit)
3135 return true;
3136 }
3137 else if (si_code <= NSIGPOLL)
3138 return true;
3139 }
3140 else if (si_code >= SI_DETHREAD)
3141 return true;
3142 else if (si_code == SI_ASYNCNL)
3143 return true;
3144 return false;
3145}
3146
3147enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3148{
3149 enum siginfo_layout layout = SIL_KILL;
3150 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3151 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3152 (si_code <= sig_sicodes[sig].limit)) {
3153 layout = sig_sicodes[sig].layout;
3154
3155 if ((sig == SIGBUS) &&
3156 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3157 layout = SIL_FAULT_MCEERR;
3158 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3159 layout = SIL_FAULT_BNDERR;
3160#ifdef SEGV_PKUERR
3161 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3162 layout = SIL_FAULT_PKUERR;
3163#endif
3164 }
3165 else if (si_code <= NSIGPOLL)
3166 layout = SIL_POLL;
3167 } else {
3168 if (si_code == SI_TIMER)
3169 layout = SIL_TIMER;
3170 else if (si_code == SI_SIGIO)
3171 layout = SIL_POLL;
3172 else if (si_code < 0)
3173 layout = SIL_RT;
3174 }
3175 return layout;
3176}
3177
3178static inline char __user *si_expansion(const siginfo_t __user *info)
3179{
3180 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3181}
3182
3183int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3184{
3185 char __user *expansion = si_expansion(to);
3186 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3187 return -EFAULT;
3188 if (clear_user(expansion, SI_EXPANSION_SIZE))
3189 return -EFAULT;
3190 return 0;
3191}
3192
3193static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3194 const siginfo_t __user *from)
3195{
3196 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3197 char __user *expansion = si_expansion(from);
3198 char buf[SI_EXPANSION_SIZE];
3199 int i;
3200
3201
3202
3203
3204
3205
3206 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3207 return -EFAULT;
3208 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3209 if (buf[i] != 0)
3210 return -E2BIG;
3211 }
3212 }
3213 return 0;
3214}
3215
3216static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3217 const siginfo_t __user *from)
3218{
3219 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3220 return -EFAULT;
3221 to->si_signo = signo;
3222 return post_copy_siginfo_from_user(to, from);
3223}
3224
3225int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3226{
3227 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3228 return -EFAULT;
3229 return post_copy_siginfo_from_user(to, from);
3230}
3231
3232#ifdef CONFIG_COMPAT
3233int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3234 const struct kernel_siginfo *from)
3235#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3236{
3237 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3238}
3239int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3240 const struct kernel_siginfo *from, bool x32_ABI)
3241#endif
3242{
3243 struct compat_siginfo new;
3244 memset(&new, 0, sizeof(new));
3245
3246 new.si_signo = from->si_signo;
3247 new.si_errno = from->si_errno;
3248 new.si_code = from->si_code;
3249 switch(siginfo_layout(from->si_signo, from->si_code)) {
3250 case SIL_KILL:
3251 new.si_pid = from->si_pid;
3252 new.si_uid = from->si_uid;
3253 break;
3254 case SIL_TIMER:
3255 new.si_tid = from->si_tid;
3256 new.si_overrun = from->si_overrun;
3257 new.si_int = from->si_int;
3258 break;
3259 case SIL_POLL:
3260 new.si_band = from->si_band;
3261 new.si_fd = from->si_fd;
3262 break;
3263 case SIL_FAULT:
3264 new.si_addr = ptr_to_compat(from->si_addr);
3265#ifdef __ARCH_SI_TRAPNO
3266 new.si_trapno = from->si_trapno;
3267#endif
3268 break;
3269 case SIL_FAULT_MCEERR:
3270 new.si_addr = ptr_to_compat(from->si_addr);
3271#ifdef __ARCH_SI_TRAPNO
3272 new.si_trapno = from->si_trapno;
3273#endif
3274 new.si_addr_lsb = from->si_addr_lsb;
3275 break;
3276 case SIL_FAULT_BNDERR:
3277 new.si_addr = ptr_to_compat(from->si_addr);
3278#ifdef __ARCH_SI_TRAPNO
3279 new.si_trapno = from->si_trapno;
3280#endif
3281 new.si_lower = ptr_to_compat(from->si_lower);
3282 new.si_upper = ptr_to_compat(from->si_upper);
3283 break;
3284 case SIL_FAULT_PKUERR:
3285 new.si_addr = ptr_to_compat(from->si_addr);
3286#ifdef __ARCH_SI_TRAPNO
3287 new.si_trapno = from->si_trapno;
3288#endif
3289 new.si_pkey = from->si_pkey;
3290 break;
3291 case SIL_CHLD:
3292 new.si_pid = from->si_pid;
3293 new.si_uid = from->si_uid;
3294 new.si_status = from->si_status;
3295#ifdef CONFIG_X86_X32_ABI
3296 if (x32_ABI) {
3297 new._sifields._sigchld_x32._utime = from->si_utime;
3298 new._sifields._sigchld_x32._stime = from->si_stime;
3299 } else
3300#endif
3301 {
3302 new.si_utime = from->si_utime;
3303 new.si_stime = from->si_stime;
3304 }
3305 break;
3306 case SIL_RT:
3307 new.si_pid = from->si_pid;
3308 new.si_uid = from->si_uid;
3309 new.si_int = from->si_int;
3310 break;
3311 case SIL_SYS:
3312 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3313 new.si_syscall = from->si_syscall;
3314 new.si_arch = from->si_arch;
3315 break;
3316 }
3317
3318 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3319 return -EFAULT;
3320
3321 return 0;
3322}
3323
3324static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3325 const struct compat_siginfo *from)
3326{
3327 clear_siginfo(to);
3328 to->si_signo = from->si_signo;
3329 to->si_errno = from->si_errno;
3330 to->si_code = from->si_code;
3331 switch(siginfo_layout(from->si_signo, from->si_code)) {
3332 case SIL_KILL:
3333 to->si_pid = from->si_pid;
3334 to->si_uid = from->si_uid;
3335 break;
3336 case SIL_TIMER:
3337 to->si_tid = from->si_tid;
3338 to->si_overrun = from->si_overrun;
3339 to->si_int = from->si_int;
3340 break;
3341 case SIL_POLL:
3342 to->si_band = from->si_band;
3343 to->si_fd = from->si_fd;
3344 break;
3345 case SIL_FAULT:
3346 to->si_addr = compat_ptr(from->si_addr);
3347#ifdef __ARCH_SI_TRAPNO
3348 to->si_trapno = from->si_trapno;
3349#endif
3350 break;
3351 case SIL_FAULT_MCEERR:
3352 to->si_addr = compat_ptr(from->si_addr);
3353#ifdef __ARCH_SI_TRAPNO
3354 to->si_trapno = from->si_trapno;
3355#endif
3356 to->si_addr_lsb = from->si_addr_lsb;
3357 break;
3358 case SIL_FAULT_BNDERR:
3359 to->si_addr = compat_ptr(from->si_addr);
3360#ifdef __ARCH_SI_TRAPNO
3361 to->si_trapno = from->si_trapno;
3362#endif
3363 to->si_lower = compat_ptr(from->si_lower);
3364 to->si_upper = compat_ptr(from->si_upper);
3365 break;
3366 case SIL_FAULT_PKUERR:
3367 to->si_addr = compat_ptr(from->si_addr);
3368#ifdef __ARCH_SI_TRAPNO
3369 to->si_trapno = from->si_trapno;
3370#endif
3371 to->si_pkey = from->si_pkey;
3372 break;
3373 case SIL_CHLD:
3374 to->si_pid = from->si_pid;
3375 to->si_uid = from->si_uid;
3376 to->si_status = from->si_status;
3377#ifdef CONFIG_X86_X32_ABI
3378 if (in_x32_syscall()) {
3379 to->si_utime = from->_sifields._sigchld_x32._utime;
3380 to->si_stime = from->_sifields._sigchld_x32._stime;
3381 } else
3382#endif
3383 {
3384 to->si_utime = from->si_utime;
3385 to->si_stime = from->si_stime;
3386 }
3387 break;
3388 case SIL_RT:
3389 to->si_pid = from->si_pid;
3390 to->si_uid = from->si_uid;
3391 to->si_int = from->si_int;
3392 break;
3393 case SIL_SYS:
3394 to->si_call_addr = compat_ptr(from->si_call_addr);
3395 to->si_syscall = from->si_syscall;
3396 to->si_arch = from->si_arch;
3397 break;
3398 }
3399 return 0;
3400}
3401
3402static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3403 const struct compat_siginfo __user *ufrom)
3404{
3405 struct compat_siginfo from;
3406
3407 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3408 return -EFAULT;
3409
3410 from.si_signo = signo;
3411 return post_copy_siginfo_from_user32(to, &from);
3412}
3413
3414int copy_siginfo_from_user32(struct kernel_siginfo *to,
3415 const struct compat_siginfo __user *ufrom)
3416{
3417 struct compat_siginfo from;
3418
3419 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3420 return -EFAULT;
3421
3422 return post_copy_siginfo_from_user32(to, &from);
3423}
3424#endif
3425
3426
3427
3428
3429
3430
3431
3432static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3433 const struct timespec64 *ts)
3434{
3435 ktime_t *to = NULL, timeout = KTIME_MAX;
3436 struct task_struct *tsk = current;
3437 sigset_t mask = *which;
3438 int sig, ret = 0;
3439
3440 if (ts) {
3441 if (!timespec64_valid(ts))
3442 return -EINVAL;
3443 timeout = timespec64_to_ktime(*ts);
3444 to = &timeout;
3445 }
3446
3447
3448
3449
3450 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3451 signotset(&mask);
3452
3453 spin_lock_irq(&tsk->sighand->siglock);
3454 sig = dequeue_signal(tsk, &mask, info);
3455 if (!sig && timeout) {
3456
3457
3458
3459
3460
3461
3462 tsk->real_blocked = tsk->blocked;
3463 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3464 recalc_sigpending();
3465 spin_unlock_irq(&tsk->sighand->siglock);
3466
3467 __set_current_state(TASK_INTERRUPTIBLE);
3468 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3469 HRTIMER_MODE_REL);
3470 spin_lock_irq(&tsk->sighand->siglock);
3471 __set_task_blocked(tsk, &tsk->real_blocked);
3472 sigemptyset(&tsk->real_blocked);
3473 sig = dequeue_signal(tsk, &mask, info);
3474 }
3475 spin_unlock_irq(&tsk->sighand->siglock);
3476
3477 if (sig)
3478 return sig;
3479 return ret ? -EINTR : -EAGAIN;
3480}
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3491 siginfo_t __user *, uinfo,
3492 const struct __kernel_timespec __user *, uts,
3493 size_t, sigsetsize)
3494{
3495 sigset_t these;
3496 struct timespec64 ts;
3497 kernel_siginfo_t info;
3498 int ret;
3499
3500
3501 if (sigsetsize != sizeof(sigset_t))
3502 return -EINVAL;
3503
3504 if (copy_from_user(&these, uthese, sizeof(these)))
3505 return -EFAULT;
3506
3507 if (uts) {
3508 if (get_timespec64(&ts, uts))
3509 return -EFAULT;
3510 }
3511
3512 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3513
3514 if (ret > 0 && uinfo) {
3515 if (copy_siginfo_to_user(uinfo, &info))
3516 ret = -EFAULT;
3517 }
3518
3519 return ret;
3520}
3521
3522#ifdef CONFIG_COMPAT_32BIT_TIME
3523SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3524 siginfo_t __user *, uinfo,
3525 const struct old_timespec32 __user *, uts,
3526 size_t, sigsetsize)
3527{
3528 sigset_t these;
3529 struct timespec64 ts;
3530 kernel_siginfo_t info;
3531 int ret;
3532
3533 if (sigsetsize != sizeof(sigset_t))
3534 return -EINVAL;
3535
3536 if (copy_from_user(&these, uthese, sizeof(these)))
3537 return -EFAULT;
3538
3539 if (uts) {
3540 if (get_old_timespec32(&ts, uts))
3541 return -EFAULT;
3542 }
3543
3544 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3545
3546 if (ret > 0 && uinfo) {
3547 if (copy_siginfo_to_user(uinfo, &info))
3548 ret = -EFAULT;
3549 }
3550
3551 return ret;
3552}
3553#endif
3554
3555#ifdef CONFIG_COMPAT
3556COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3557 struct compat_siginfo __user *, uinfo,
3558 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3559{
3560 sigset_t s;
3561 struct timespec64 t;
3562 kernel_siginfo_t info;
3563 long ret;
3564
3565 if (sigsetsize != sizeof(sigset_t))
3566 return -EINVAL;
3567
3568 if (get_compat_sigset(&s, uthese))
3569 return -EFAULT;
3570
3571 if (uts) {
3572 if (get_timespec64(&t, uts))
3573 return -EFAULT;
3574 }
3575
3576 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3577
3578 if (ret > 0 && uinfo) {
3579 if (copy_siginfo_to_user32(uinfo, &info))
3580 ret = -EFAULT;
3581 }
3582
3583 return ret;
3584}
3585
3586#ifdef CONFIG_COMPAT_32BIT_TIME
3587COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3588 struct compat_siginfo __user *, uinfo,
3589 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3590{
3591 sigset_t s;
3592 struct timespec64 t;
3593 kernel_siginfo_t info;
3594 long ret;
3595
3596 if (sigsetsize != sizeof(sigset_t))
3597 return -EINVAL;
3598
3599 if (get_compat_sigset(&s, uthese))
3600 return -EFAULT;
3601
3602 if (uts) {
3603 if (get_old_timespec32(&t, uts))
3604 return -EFAULT;
3605 }
3606
3607 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3608
3609 if (ret > 0 && uinfo) {
3610 if (copy_siginfo_to_user32(uinfo, &info))
3611 ret = -EFAULT;
3612 }
3613
3614 return ret;
3615}
3616#endif
3617#endif
3618
3619static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3620{
3621 clear_siginfo(info);
3622 info->si_signo = sig;
3623 info->si_errno = 0;
3624 info->si_code = SI_USER;
3625 info->si_pid = task_tgid_vnr(current);
3626 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3627}
3628
3629
3630
3631
3632
3633
3634SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3635{
3636 struct kernel_siginfo info;
3637
3638 prepare_kill_siginfo(sig, &info);
3639
3640 return kill_something_info(sig, &info, pid);
3641}
3642
3643
3644
3645
3646
3647
3648static bool access_pidfd_pidns(struct pid *pid)
3649{
3650 struct pid_namespace *active = task_active_pid_ns(current);
3651 struct pid_namespace *p = ns_of_pid(pid);
3652
3653 for (;;) {
3654 if (!p)
3655 return false;
3656 if (p == active)
3657 break;
3658 p = p->parent;
3659 }
3660
3661 return true;
3662}
3663
3664static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3665{
3666#ifdef CONFIG_COMPAT
3667
3668
3669
3670
3671
3672 if (in_compat_syscall())
3673 return copy_siginfo_from_user32(
3674 kinfo, (struct compat_siginfo __user *)info);
3675#endif
3676 return copy_siginfo_from_user(kinfo, info);
3677}
3678
3679static struct pid *pidfd_to_pid(const struct file *file)
3680{
3681 if (file->f_op == &pidfd_fops)
3682 return file->private_data;
3683
3684 return tgid_pidfd_to_pid(file);
3685}
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3706 siginfo_t __user *, info, unsigned int, flags)
3707{
3708 int ret;
3709 struct fd f;
3710 struct pid *pid;
3711 kernel_siginfo_t kinfo;
3712
3713
3714 if (flags)
3715 return -EINVAL;
3716
3717 f = fdget(pidfd);
3718 if (!f.file)
3719 return -EBADF;
3720
3721
3722 pid = pidfd_to_pid(f.file);
3723 if (IS_ERR(pid)) {
3724 ret = PTR_ERR(pid);
3725 goto err;
3726 }
3727
3728 ret = -EINVAL;
3729 if (!access_pidfd_pidns(pid))
3730 goto err;
3731
3732 if (info) {
3733 ret = copy_siginfo_from_user_any(&kinfo, info);
3734 if (unlikely(ret))
3735 goto err;
3736
3737 ret = -EINVAL;
3738 if (unlikely(sig != kinfo.si_signo))
3739 goto err;
3740
3741
3742 ret = -EPERM;
3743 if ((task_pid(current) != pid) &&
3744 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3745 goto err;
3746 } else {
3747 prepare_kill_siginfo(sig, &kinfo);
3748 }
3749
3750 ret = kill_pid_info(sig, &kinfo, pid);
3751
3752err:
3753 fdput(f);
3754 return ret;
3755}
3756
3757static int
3758do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3759{
3760 struct task_struct *p;
3761 int error = -ESRCH;
3762
3763 rcu_read_lock();
3764 p = find_task_by_vpid(pid);
3765 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3766 error = check_kill_permission(sig, info, p);
3767
3768
3769
3770
3771 if (!error && sig) {
3772 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3773
3774
3775
3776
3777
3778 if (unlikely(error == -ESRCH))
3779 error = 0;
3780 }
3781 }
3782 rcu_read_unlock();
3783
3784 return error;
3785}
3786
3787static int do_tkill(pid_t tgid, pid_t pid, int sig)
3788{
3789 struct kernel_siginfo info;
3790
3791 clear_siginfo(&info);
3792 info.si_signo = sig;
3793 info.si_errno = 0;
3794 info.si_code = SI_TKILL;
3795 info.si_pid = task_tgid_vnr(current);
3796 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3797
3798 return do_send_specific(tgid, pid, sig, &info);
3799}
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3812{
3813
3814 if (pid <= 0 || tgid <= 0)
3815 return -EINVAL;
3816
3817 return do_tkill(tgid, pid, sig);
3818}
3819
3820
3821
3822
3823
3824
3825
3826
3827SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3828{
3829
3830 if (pid <= 0)
3831 return -EINVAL;
3832
3833 return do_tkill(0, pid, sig);
3834}
3835
3836static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3837{
3838
3839
3840
3841 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3842 (task_pid_vnr(current) != pid))
3843 return -EPERM;
3844
3845
3846 return kill_proc_info(sig, info, pid);
3847}
3848
3849
3850
3851
3852
3853
3854
3855SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3856 siginfo_t __user *, uinfo)
3857{
3858 kernel_siginfo_t info;
3859 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3860 if (unlikely(ret))
3861 return ret;
3862 return do_rt_sigqueueinfo(pid, sig, &info);
3863}
3864
3865#ifdef CONFIG_COMPAT
3866COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3867 compat_pid_t, pid,
3868 int, sig,
3869 struct compat_siginfo __user *, uinfo)
3870{
3871 kernel_siginfo_t info;
3872 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3873 if (unlikely(ret))
3874 return ret;
3875 return do_rt_sigqueueinfo(pid, sig, &info);
3876}
3877#endif
3878
3879static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3880{
3881
3882 if (pid <= 0 || tgid <= 0)
3883 return -EINVAL;
3884
3885
3886
3887
3888 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3889 (task_pid_vnr(current) != pid))
3890 return -EPERM;
3891
3892 return do_send_specific(tgid, pid, sig, info);
3893}
3894
3895SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3896 siginfo_t __user *, uinfo)
3897{
3898 kernel_siginfo_t info;
3899 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3900 if (unlikely(ret))
3901 return ret;
3902 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3903}
3904
3905#ifdef CONFIG_COMPAT
3906COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3907 compat_pid_t, tgid,
3908 compat_pid_t, pid,
3909 int, sig,
3910 struct compat_siginfo __user *, uinfo)
3911{
3912 kernel_siginfo_t info;
3913 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3914 if (unlikely(ret))
3915 return ret;
3916 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3917}
3918#endif
3919
3920
3921
3922
3923void kernel_sigaction(int sig, __sighandler_t action)
3924{
3925 spin_lock_irq(¤t->sighand->siglock);
3926 current->sighand->action[sig - 1].sa.sa_handler = action;
3927 if (action == SIG_IGN) {
3928 sigset_t mask;
3929
3930 sigemptyset(&mask);
3931 sigaddset(&mask, sig);
3932
3933 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3934 flush_sigqueue_mask(&mask, ¤t->pending);
3935 recalc_sigpending();
3936 }
3937 spin_unlock_irq(¤t->sighand->siglock);
3938}
3939EXPORT_SYMBOL(kernel_sigaction);
3940
3941void __weak sigaction_compat_abi(struct k_sigaction *act,
3942 struct k_sigaction *oact)
3943{
3944}
3945
3946int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3947{
3948 struct task_struct *p = current, *t;
3949 struct k_sigaction *k;
3950 sigset_t mask;
3951
3952 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3953 return -EINVAL;
3954
3955 k = &p->sighand->action[sig-1];
3956
3957 spin_lock_irq(&p->sighand->siglock);
3958 if (oact)
3959 *oact = *k;
3960
3961 sigaction_compat_abi(act, oact);
3962
3963 if (act) {
3964 sigdelsetmask(&act->sa.sa_mask,
3965 sigmask(SIGKILL) | sigmask(SIGSTOP));
3966 *k = *act;
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3979 sigemptyset(&mask);
3980 sigaddset(&mask, sig);
3981 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3982 for_each_thread(p, t)
3983 flush_sigqueue_mask(&mask, &t->pending);
3984 }
3985 }
3986
3987 spin_unlock_irq(&p->sighand->siglock);
3988 return 0;
3989}
3990
3991static int
3992do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3993 size_t min_ss_size)
3994{
3995 struct task_struct *t = current;
3996
3997 if (oss) {
3998 memset(oss, 0, sizeof(stack_t));
3999 oss->ss_sp = (void __user *) t->sas_ss_sp;
4000 oss->ss_size = t->sas_ss_size;
4001 oss->ss_flags = sas_ss_flags(sp) |
4002 (current->sas_ss_flags & SS_FLAG_BITS);
4003 }
4004
4005 if (ss) {
4006 void __user *ss_sp = ss->ss_sp;
4007 size_t ss_size = ss->ss_size;
4008 unsigned ss_flags = ss->ss_flags;
4009 int ss_mode;
4010
4011 if (unlikely(on_sig_stack(sp)))
4012 return -EPERM;
4013
4014 ss_mode = ss_flags & ~SS_FLAG_BITS;
4015 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4016 ss_mode != 0))
4017 return -EINVAL;
4018
4019 if (ss_mode == SS_DISABLE) {
4020 ss_size = 0;
4021 ss_sp = NULL;
4022 } else {
4023 if (unlikely(ss_size < min_ss_size))
4024 return -ENOMEM;
4025 }
4026
4027 t->sas_ss_sp = (unsigned long) ss_sp;
4028 t->sas_ss_size = ss_size;
4029 t->sas_ss_flags = ss_flags;
4030 }
4031 return 0;
4032}
4033
4034SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4035{
4036 stack_t new, old;
4037 int err;
4038 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4039 return -EFAULT;
4040 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4041 current_user_stack_pointer(),
4042 MINSIGSTKSZ);
4043 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4044 err = -EFAULT;
4045 return err;
4046}
4047
4048int restore_altstack(const stack_t __user *uss)
4049{
4050 stack_t new;
4051 if (copy_from_user(&new, uss, sizeof(stack_t)))
4052 return -EFAULT;
4053 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4054 MINSIGSTKSZ);
4055
4056 return 0;
4057}
4058
4059int __save_altstack(stack_t __user *uss, unsigned long sp)
4060{
4061 struct task_struct *t = current;
4062 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4063 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4064 __put_user(t->sas_ss_size, &uss->ss_size);
4065 if (err)
4066 return err;
4067 if (t->sas_ss_flags & SS_AUTODISARM)
4068 sas_ss_reset(t);
4069 return 0;
4070}
4071
4072#ifdef CONFIG_COMPAT
4073static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4074 compat_stack_t __user *uoss_ptr)
4075{
4076 stack_t uss, uoss;
4077 int ret;
4078
4079 if (uss_ptr) {
4080 compat_stack_t uss32;
4081 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4082 return -EFAULT;
4083 uss.ss_sp = compat_ptr(uss32.ss_sp);
4084 uss.ss_flags = uss32.ss_flags;
4085 uss.ss_size = uss32.ss_size;
4086 }
4087 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4088 compat_user_stack_pointer(),
4089 COMPAT_MINSIGSTKSZ);
4090 if (ret >= 0 && uoss_ptr) {
4091 compat_stack_t old;
4092 memset(&old, 0, sizeof(old));
4093 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4094 old.ss_flags = uoss.ss_flags;
4095 old.ss_size = uoss.ss_size;
4096 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4097 ret = -EFAULT;
4098 }
4099 return ret;
4100}
4101
4102COMPAT_SYSCALL_DEFINE2(sigaltstack,
4103 const compat_stack_t __user *, uss_ptr,
4104 compat_stack_t __user *, uoss_ptr)
4105{
4106 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4107}
4108
4109int compat_restore_altstack(const compat_stack_t __user *uss)
4110{
4111 int err = do_compat_sigaltstack(uss, NULL);
4112
4113 return err == -EFAULT ? err : 0;
4114}
4115
4116int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4117{
4118 int err;
4119 struct task_struct *t = current;
4120 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4121 &uss->ss_sp) |
4122 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4123 __put_user(t->sas_ss_size, &uss->ss_size);
4124 if (err)
4125 return err;
4126 if (t->sas_ss_flags & SS_AUTODISARM)
4127 sas_ss_reset(t);
4128 return 0;
4129}
4130#endif
4131
4132#ifdef __ARCH_WANT_SYS_SIGPENDING
4133
4134
4135
4136
4137
4138SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4139{
4140 sigset_t set;
4141
4142 if (sizeof(old_sigset_t) > sizeof(*uset))
4143 return -EINVAL;
4144
4145 do_sigpending(&set);
4146
4147 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4148 return -EFAULT;
4149
4150 return 0;
4151}
4152
4153#ifdef CONFIG_COMPAT
4154COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4155{
4156 sigset_t set;
4157
4158 do_sigpending(&set);
4159
4160 return put_user(set.sig[0], set32);
4161}
4162#endif
4163
4164#endif
4165
4166#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4178 old_sigset_t __user *, oset)
4179{
4180 old_sigset_t old_set, new_set;
4181 sigset_t new_blocked;
4182
4183 old_set = current->blocked.sig[0];
4184
4185 if (nset) {
4186 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4187 return -EFAULT;
4188
4189 new_blocked = current->blocked;
4190
4191 switch (how) {
4192 case SIG_BLOCK:
4193 sigaddsetmask(&new_blocked, new_set);
4194 break;
4195 case SIG_UNBLOCK:
4196 sigdelsetmask(&new_blocked, new_set);
4197 break;
4198 case SIG_SETMASK:
4199 new_blocked.sig[0] = new_set;
4200 break;
4201 default:
4202 return -EINVAL;
4203 }
4204
4205 set_current_blocked(&new_blocked);
4206 }
4207
4208 if (oset) {
4209 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4210 return -EFAULT;
4211 }
4212
4213 return 0;
4214}
4215#endif
4216
4217#ifndef CONFIG_ODD_RT_SIGACTION
4218
4219
4220
4221
4222
4223
4224
4225SYSCALL_DEFINE4(rt_sigaction, int, sig,
4226 const struct sigaction __user *, act,
4227 struct sigaction __user *, oact,
4228 size_t, sigsetsize)
4229{
4230 struct k_sigaction new_sa, old_sa;
4231 int ret;
4232
4233
4234 if (sigsetsize != sizeof(sigset_t))
4235 return -EINVAL;
4236
4237 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4238 return -EFAULT;
4239
4240 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4241 if (ret)
4242 return ret;
4243
4244 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4245 return -EFAULT;
4246
4247 return 0;
4248}
4249#ifdef CONFIG_COMPAT
4250COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4251 const struct compat_sigaction __user *, act,
4252 struct compat_sigaction __user *, oact,
4253 compat_size_t, sigsetsize)
4254{
4255 struct k_sigaction new_ka, old_ka;
4256#ifdef __ARCH_HAS_SA_RESTORER
4257 compat_uptr_t restorer;
4258#endif
4259 int ret;
4260
4261
4262 if (sigsetsize != sizeof(compat_sigset_t))
4263 return -EINVAL;
4264
4265 if (act) {
4266 compat_uptr_t handler;
4267 ret = get_user(handler, &act->sa_handler);
4268 new_ka.sa.sa_handler = compat_ptr(handler);
4269#ifdef __ARCH_HAS_SA_RESTORER
4270 ret |= get_user(restorer, &act->sa_restorer);
4271 new_ka.sa.sa_restorer = compat_ptr(restorer);
4272#endif
4273 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4274 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4275 if (ret)
4276 return -EFAULT;
4277 }
4278
4279 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4280 if (!ret && oact) {
4281 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4282 &oact->sa_handler);
4283 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4284 sizeof(oact->sa_mask));
4285 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4286#ifdef __ARCH_HAS_SA_RESTORER
4287 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4288 &oact->sa_restorer);
4289#endif
4290 }
4291 return ret;
4292}
4293#endif
4294#endif
4295
4296#ifdef CONFIG_OLD_SIGACTION
4297SYSCALL_DEFINE3(sigaction, int, sig,
4298 const struct old_sigaction __user *, act,
4299 struct old_sigaction __user *, oact)
4300{
4301 struct k_sigaction new_ka, old_ka;
4302 int ret;
4303
4304 if (act) {
4305 old_sigset_t mask;
4306 if (!access_ok(act, sizeof(*act)) ||
4307 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4308 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4309 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4310 __get_user(mask, &act->sa_mask))
4311 return -EFAULT;
4312#ifdef __ARCH_HAS_KA_RESTORER
4313 new_ka.ka_restorer = NULL;
4314#endif
4315 siginitset(&new_ka.sa.sa_mask, mask);
4316 }
4317
4318 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4319
4320 if (!ret && oact) {
4321 if (!access_ok(oact, sizeof(*oact)) ||
4322 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4323 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4324 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4325 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4326 return -EFAULT;
4327 }
4328
4329 return ret;
4330}
4331#endif
4332#ifdef CONFIG_COMPAT_OLD_SIGACTION
4333COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4334 const struct compat_old_sigaction __user *, act,
4335 struct compat_old_sigaction __user *, oact)
4336{
4337 struct k_sigaction new_ka, old_ka;
4338 int ret;
4339 compat_old_sigset_t mask;
4340 compat_uptr_t handler, restorer;
4341
4342 if (act) {
4343 if (!access_ok(act, sizeof(*act)) ||
4344 __get_user(handler, &act->sa_handler) ||
4345 __get_user(restorer, &act->sa_restorer) ||
4346 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4347 __get_user(mask, &act->sa_mask))
4348 return -EFAULT;
4349
4350#ifdef __ARCH_HAS_KA_RESTORER
4351 new_ka.ka_restorer = NULL;
4352#endif
4353 new_ka.sa.sa_handler = compat_ptr(handler);
4354 new_ka.sa.sa_restorer = compat_ptr(restorer);
4355 siginitset(&new_ka.sa.sa_mask, mask);
4356 }
4357
4358 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4359
4360 if (!ret && oact) {
4361 if (!access_ok(oact, sizeof(*oact)) ||
4362 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4363 &oact->sa_handler) ||
4364 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4365 &oact->sa_restorer) ||
4366 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4367 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4368 return -EFAULT;
4369 }
4370 return ret;
4371}
4372#endif
4373
4374#ifdef CONFIG_SGETMASK_SYSCALL
4375
4376
4377
4378
4379SYSCALL_DEFINE0(sgetmask)
4380{
4381
4382 return current->blocked.sig[0];
4383}
4384
4385SYSCALL_DEFINE1(ssetmask, int, newmask)
4386{
4387 int old = current->blocked.sig[0];
4388 sigset_t newset;
4389
4390 siginitset(&newset, newmask);
4391 set_current_blocked(&newset);
4392
4393 return old;
4394}
4395#endif
4396
4397#ifdef __ARCH_WANT_SYS_SIGNAL
4398
4399
4400
4401SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4402{
4403 struct k_sigaction new_sa, old_sa;
4404 int ret;
4405
4406 new_sa.sa.sa_handler = handler;
4407 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4408 sigemptyset(&new_sa.sa.sa_mask);
4409
4410 ret = do_sigaction(sig, &new_sa, &old_sa);
4411
4412 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4413}
4414#endif
4415
4416#ifdef __ARCH_WANT_SYS_PAUSE
4417
4418SYSCALL_DEFINE0(pause)
4419{
4420 while (!signal_pending(current)) {
4421 __set_current_state(TASK_INTERRUPTIBLE);
4422 schedule();
4423 }
4424 return -ERESTARTNOHAND;
4425}
4426
4427#endif
4428
4429static int sigsuspend(sigset_t *set)
4430{
4431 current->saved_sigmask = current->blocked;
4432 set_current_blocked(set);
4433
4434 while (!signal_pending(current)) {
4435 __set_current_state(TASK_INTERRUPTIBLE);
4436 schedule();
4437 }
4438 set_restore_sigmask();
4439 return -ERESTARTNOHAND;
4440}
4441
4442
4443
4444
4445
4446
4447
4448SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4449{
4450 sigset_t newset;
4451
4452
4453 if (sigsetsize != sizeof(sigset_t))
4454 return -EINVAL;
4455
4456 if (copy_from_user(&newset, unewset, sizeof(newset)))
4457 return -EFAULT;
4458 return sigsuspend(&newset);
4459}
4460
4461#ifdef CONFIG_COMPAT
4462COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4463{
4464 sigset_t newset;
4465
4466
4467 if (sigsetsize != sizeof(sigset_t))
4468 return -EINVAL;
4469
4470 if (get_compat_sigset(&newset, unewset))
4471 return -EFAULT;
4472 return sigsuspend(&newset);
4473}
4474#endif
4475
4476#ifdef CONFIG_OLD_SIGSUSPEND
4477SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4478{
4479 sigset_t blocked;
4480 siginitset(&blocked, mask);
4481 return sigsuspend(&blocked);
4482}
4483#endif
4484#ifdef CONFIG_OLD_SIGSUSPEND3
4485SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4486{
4487 sigset_t blocked;
4488 siginitset(&blocked, mask);
4489 return sigsuspend(&blocked);
4490}
4491#endif
4492
4493__weak const char *arch_vma_name(struct vm_area_struct *vma)
4494{
4495 return NULL;
4496}
4497
4498static inline void siginfo_buildtime_checks(void)
4499{
4500 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4501
4502
4503#define CHECK_OFFSET(field) \
4504 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4505
4506
4507 CHECK_OFFSET(si_pid);
4508 CHECK_OFFSET(si_uid);
4509
4510
4511 CHECK_OFFSET(si_tid);
4512 CHECK_OFFSET(si_overrun);
4513 CHECK_OFFSET(si_value);
4514
4515
4516 CHECK_OFFSET(si_pid);
4517 CHECK_OFFSET(si_uid);
4518 CHECK_OFFSET(si_value);
4519
4520
4521 CHECK_OFFSET(si_pid);
4522 CHECK_OFFSET(si_uid);
4523 CHECK_OFFSET(si_status);
4524 CHECK_OFFSET(si_utime);
4525 CHECK_OFFSET(si_stime);
4526
4527
4528 CHECK_OFFSET(si_addr);
4529 CHECK_OFFSET(si_addr_lsb);
4530 CHECK_OFFSET(si_lower);
4531 CHECK_OFFSET(si_upper);
4532 CHECK_OFFSET(si_pkey);
4533
4534
4535 CHECK_OFFSET(si_band);
4536 CHECK_OFFSET(si_fd);
4537
4538
4539 CHECK_OFFSET(si_call_addr);
4540 CHECK_OFFSET(si_syscall);
4541 CHECK_OFFSET(si_arch);
4542#undef CHECK_OFFSET
4543
4544
4545 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4546 offsetof(struct siginfo, si_addr));
4547 if (sizeof(int) == sizeof(void __user *)) {
4548 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4549 sizeof(void __user *));
4550 } else {
4551 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4552 sizeof_field(struct siginfo, si_uid)) !=
4553 sizeof(void __user *));
4554 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4555 offsetof(struct siginfo, si_uid));
4556 }
4557#ifdef CONFIG_COMPAT
4558 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4559 offsetof(struct compat_siginfo, si_addr));
4560 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4561 sizeof(compat_uptr_t));
4562 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4563 sizeof_field(struct siginfo, si_pid));
4564#endif
4565}
4566
4567void __init signals_init(void)
4568{
4569 siginfo_buildtime_checks();
4570
4571 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4572}
4573
4574#ifdef CONFIG_KGDB_KDB
4575#include <linux/kdb.h>
4576
4577
4578
4579
4580
4581
4582void kdb_send_sig(struct task_struct *t, int sig)
4583{
4584 static struct task_struct *kdb_prev_t;
4585 int new_t, ret;
4586 if (!spin_trylock(&t->sighand->siglock)) {
4587 kdb_printf("Can't do kill command now.\n"
4588 "The sigmask lock is held somewhere else in "
4589 "kernel, try again later\n");
4590 return;
4591 }
4592 new_t = kdb_prev_t != t;
4593 kdb_prev_t = t;
4594 if (t->state != TASK_RUNNING && new_t) {
4595 spin_unlock(&t->sighand->siglock);
4596 kdb_printf("Process is not RUNNING, sending a signal from "
4597 "kdb risks deadlock\n"
4598 "on the run queue locks. "
4599 "The signal has _not_ been sent.\n"
4600 "Reissue the kill command if you want to risk "
4601 "the deadlock.\n");
4602 return;
4603 }
4604 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4605 spin_unlock(&t->sighand->siglock);
4606 if (ret)
4607 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4608 sig, t->pid);
4609 else
4610 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4611}
4612#endif
4613